aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/i2c/muxes/gpio-i2cmux65
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/networking/dccp.txt1
-rw-r--r--Documentation/powerpc/booting-without-of.txt4
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/cpm.txt52
-rw-r--r--MAINTAINERS95
-rw-r--r--arch/arm/mach-dove/common.c4
-rw-r--r--arch/arm/mach-tegra/include/mach/sdhci.h29
-rw-r--r--arch/blackfin/Makefile9
-rw-r--r--arch/blackfin/boot/Makefile2
-rw-r--r--arch/blackfin/configs/BF561-EZKIT-SMP_defconfig113
-rw-r--r--arch/blackfin/configs/DNP5370_defconfig121
-rw-r--r--arch/blackfin/include/asm/bfin_dma.h91
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h275
-rw-r--r--arch/blackfin/include/asm/bitops.h2
-rw-r--r--arch/blackfin/include/asm/cache.h2
-rw-r--r--arch/blackfin/include/asm/cacheflush.h3
-rw-r--r--arch/blackfin/include/asm/dma.h37
-rw-r--r--arch/blackfin/include/asm/dpmc.h2
-rw-r--r--arch/blackfin/include/asm/io.h268
-rw-r--r--arch/blackfin/include/asm/irqflags.h3
-rw-r--r--arch/blackfin/include/asm/processor.h4
-rw-r--r--arch/blackfin/include/asm/spinlock.h28
-rw-r--r--arch/blackfin/include/mach-common/pll.h86
-rw-r--r--arch/blackfin/include/mach-common/ports-a.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-b.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-c.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-d.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-e.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-f.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-g.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-h.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-i.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-j.h25
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c2
-rw-r--r--arch/blackfin/kernel/kgdb.c26
-rw-r--r--arch/blackfin/kernel/kgdb_test.c4
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c29
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c12
-rw-r--r--arch/blackfin/mach-bf518/dma.c2
-rw-r--r--arch/blackfin/mach-bf518/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h73
-rw-r--r--arch/blackfin/mach-bf518/include/mach/blackfin.h68
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF512.h1038
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF514.h5
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF516.h5
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF518.h5
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h1061
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF512.h1388
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF51x_base.h1495
-rw-r--r--arch/blackfin/mach-bf518/include/mach/gpio.h4
-rw-r--r--arch/blackfin/mach-bf518/include/mach/pll.h64
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c13
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c13
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c13
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c18
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c14
-rw-r--r--arch/blackfin/mach-bf527/dma.c2
-rw-r--r--arch/blackfin/mach-bf527/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h73
-rw-r--r--arch/blackfin/mach-bf527/include/mach/blackfin.h48
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF522.h1092
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF525.h7
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF527.h7
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h1113
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF522.h1393
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF525.h2
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF527.h2
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF52x_base.h1506
-rw-r--r--arch/blackfin/mach-bf527/include/mach/gpio.h4
-rw-r--r--arch/blackfin/mach-bf527/include/mach/pll.h64
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c25
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c10
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c11
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c30
-rw-r--r--arch/blackfin/mach-bf533/dma.c2
-rw-r--r--arch/blackfin/mach-bf533/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h74
-rw-r--r--arch/blackfin/mach-bf533/include/mach/blackfin.h30
-rw-r--r--arch/blackfin/mach-bf533/include/mach/cdefBF532.h63
-rw-r--r--arch/blackfin/mach-bf533/include/mach/defBF532.h128
-rw-r--r--arch/blackfin/mach-bf533/include/mach/fio_flag.h55
-rw-r--r--arch/blackfin/mach-bf533/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf533/include/mach/pll.h58
-rw-r--r--arch/blackfin/mach-bf537/boards/Kconfig6
-rw-r--r--arch/blackfin/mach-bf537/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c12
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c12
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c418
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c12
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c4
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c18
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c12
-rw-r--r--arch/blackfin/mach-bf537/dma.c2
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h72
-rw-r--r--arch/blackfin/mach-bf537/include/mach/blackfin.h38
-rw-r--r--arch/blackfin/mach-bf537/include/mach/cdefBF534.h27
-rw-r--r--arch/blackfin/mach-bf537/include/mach/cdefBF537.h5
-rw-r--r--arch/blackfin/mach-bf537/include/mach/defBF534.h111
-rw-r--r--arch/blackfin/mach-bf537/include/mach/defBF537.h5
-rw-r--r--arch/blackfin/mach-bf537/include/mach/gpio.h4
-rw-r--r--arch/blackfin/mach-bf537/include/mach/pll.h58
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c20
-rw-r--r--arch/blackfin/mach-bf538/dma.c18
-rw-r--r--arch/blackfin/mach-bf538/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h73
-rw-r--r--arch/blackfin/mach-bf538/include/mach/blackfin.h37
-rw-r--r--arch/blackfin/mach-bf538/include/mach/cdefBF538.h504
-rw-r--r--arch/blackfin/mach-bf538/include/mach/cdefBF539.h8
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF538.h1825
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF539.h2095
-rw-r--r--arch/blackfin/mach-bf538/include/mach/gpio.h5
-rw-r--r--arch/blackfin/mach-bf538/include/mach/pll.h64
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c23
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c23
-rw-r--r--arch/blackfin/mach-bf548/dma.c2
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bfin_serial.h16
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h60
-rw-r--r--arch/blackfin/mach-bf548/include/mach/blackfin.h70
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF542.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF544.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF547.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF548.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF549.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h23
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF542.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF544.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF547.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF548.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF549.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF54x_base.h321
-rw-r--r--arch/blackfin/mach-bf548/include/mach/gpio.h11
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h22
-rw-r--r--arch/blackfin/mach-bf548/include/mach/pll.h70
-rw-r--r--arch/blackfin/mach-bf561/atomic.S5
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/tepla.c2
-rw-r--r--arch/blackfin/mach-bf561/dma.c18
-rw-r--r--arch/blackfin/mach-bf561/hotplug.c2
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h6
-rw-r--r--arch/blackfin/mach-bf561/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h74
-rw-r--r--arch/blackfin/mach-bf561/include/mach/blackfin.h26
-rw-r--r--arch/blackfin/mach-bf561/include/mach/cdefBF561.h503
-rw-r--r--arch/blackfin/mach-bf561/include/mach/defBF561.h408
-rw-r--r--arch/blackfin/mach-bf561/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf561/include/mach/mem_map.h16
-rw-r--r--arch/blackfin/mach-bf561/include/mach/pll.h83
-rw-r--r--arch/blackfin/mach-bf561/include/mach/smp.h10
-rw-r--r--arch/blackfin/mach-bf561/smp.c29
-rw-r--r--arch/blackfin/mach-common/entry.S4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c1
-rw-r--r--arch/blackfin/mach-common/pm.c10
-rw-r--r--arch/blackfin/mach-common/smp.c204
-rw-r--r--arch/blackfin/mm/sram-alloc.c18
-rw-r--r--arch/microblaze/Kconfig.debug4
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/configs/mmu_defconfig1
-rw-r--r--arch/microblaze/include/asm/pvr.h185
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c1
-rw-r--r--arch/microblaze/kernel/entry.S46
-rw-r--r--arch/microblaze/kernel/exceptions.c3
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S9
-rw-r--r--arch/microblaze/kernel/prom.c4
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S16
-rw-r--r--arch/microblaze/lib/memmove.c2
-rw-r--r--arch/microblaze/lib/muldi3.S121
-rw-r--r--arch/microblaze/lib/muldi3.c60
-rw-r--r--arch/powerpc/Kconfig16
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts31
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc8308_p1m.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8308rdb.dts8
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig5
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig3
-rw-r--r--arch/powerpc/include/asm/bitops.h9
-rw-r--r--arch/powerpc/include/asm/cputable.h9
-rw-r--r--arch/powerpc/include/asm/cputhreads.h15
-rw-r--r--arch/powerpc/include/asm/device.h6
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h4
-rw-r--r--arch/powerpc/include/asm/lppaca.h5
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/mmzone.h5
-rw-r--r--arch/powerpc/include/asm/nvram.h52
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h8
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/topology.h15
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h2
-rw-r--r--arch/powerpc/kernel/Makefile9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cputable.c22
-rw-r--r--arch/powerpc/kernel/crash_dump.c33
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S1
-rw-r--r--arch/powerpc/kernel/fpu.S1
-rw-r--r--arch/powerpc/kernel/head_40x.S1
-rw-r--r--arch/powerpc/kernel/head_44x.S1
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/head_8xx.S1
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1
-rw-r--r--arch/powerpc/kernel/iommu.c14
-rw-r--r--arch/powerpc/kernel/misc.S5
-rw-r--r--arch/powerpc/kernel/misc_32.S1
-rw-r--r--arch/powerpc/kernel/misc_64.S1
-rw-r--r--arch/powerpc/kernel/nvram_64.c481
-rw-r--r--arch/powerpc/kernel/pci_64.c3
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c7
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S1
-rw-r--r--arch/powerpc/kernel/ptrace.c22
-rw-r--r--arch/powerpc/kernel/ptrace32.c7
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c19
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/vector.S1
-rw-r--r--arch/powerpc/kernel/vio.c15
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/hweight_64.S110
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
-rw-r--r--arch/powerpc/mm/numa.c311
-rw-r--r--arch/powerpc/mm/pgtable_32.c3
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/platforms/44x/Makefile5
-rw-r--r--arch/powerpc/platforms/Kconfig7
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c3
-rw-r--r--arch/powerpc/platforms/chrp/time.c4
-rw-r--r--arch/powerpc/platforms/iseries/mf.c62
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c19
-rw-r--r--arch/powerpc/platforms/powermac/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig10
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c1
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c49
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c205
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c326
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c9
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c75
-rw-r--r--arch/powerpc/sysdev/ppc4xx_cpm.c346
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c8
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c16
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c2
-rw-r--r--arch/sh/boards/mach-migor/setup.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c25
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c24
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c4
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c4
-rw-r--r--arch/sparc/kernel/cpu.c2
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/x86/include/asm/acpi.h11
-rw-r--r--arch/x86/include/asm/amd_nb.h13
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/gpio.h5
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/mach_traps.h12
-rw-r--r--arch/x86/include/asm/nmi.h20
-rw-r--r--arch/x86/include/asm/numa_64.h2
-rw-r--r--arch/x86/include/asm/perf_event_p4.h3
-rw-r--r--arch/x86/kernel/amd_nb.c7
-rw-r--r--arch/x86/kernel/aperture_64.c44
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c28
-rw-r--r--arch/x86/kernel/dumpstack.c6
-rw-r--r--arch/x86/kernel/entry_64.S36
-rw-r--r--arch/x86/kernel/kgdb.c7
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/traps.c102
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/mm/amdtopology_64.c86
-rw-r--r--arch/x86/mm/numa_64.c157
-rw-r--r--arch/x86/mm/srat_64.c26
-rw-r--r--arch/x86/oprofile/nmi_int.c3
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c2
-rw-r--r--arch/x86/pci/amd_bus.c33
-rw-r--r--drivers/atm/ambassador.c19
-rw-r--r--drivers/char/agp/agp.h1
-rw-r--r--drivers/char/agp/compat_ioctl.c1
-rw-r--r--drivers/char/agp/compat_ioctl.h1
-rw-r--r--drivers/char/agp/frontend.c8
-rw-r--r--drivers/char/agp/generic.c27
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/agp/intel-agp.h14
-rw-r--r--drivers/char/agp/intel-gtt.c778
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/mpc512x_dma.c187
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c18
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c61
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c566
-rw-r--r--drivers/gpu/drm/drm_mm.c40
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c471
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c794
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c80
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h605
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3764
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c1343
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c139
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c724
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h193
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c104
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h91
-rw-r--r--drivers/gpu/drm/i915/intel_display.c999
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h22
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c32
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c43
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c116
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1007
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h136
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c102
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c319
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c383
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h425
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c189
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c117
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c171
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1210
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c426
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c271
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c754
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c300
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c439
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c240
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c645
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c124
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c244
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c205
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c27
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c422
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c345
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c114
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c198
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c677
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c375
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c180
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c190
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c269
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c365
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c705
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h64
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2874
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c317
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h3
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h48
-rw-r--r--drivers/gpu/drm/radeon/atom.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios.h997
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c57
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c806
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c92
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h53
-rw-r--r--drivers/gpu/drm/radeon/ni.c316
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h86
-rw-r--r--drivers/gpu/drm/radeon/nid.h41
-rw-r--r--drivers/gpu/drm/radeon/r100.c78
-rw-r--r--drivers/gpu/drm/radeon/r100d.h2
-rw-r--r--drivers/gpu/drm/radeon/r300.c21
-rw-r--r--drivers/gpu/drm/radeon/r300d.h1
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r600.c357
-rw-r--r--drivers/gpu/drm/radeon/r600d.h48
-rw-r--r--drivers/gpu/drm/radeon/radeon.h151
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c153
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1246
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c391
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c205
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c9
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv51516
-rw-r--r--drivers/gpu/drm/radeon/rs600.c118
-rw-r--r--drivers/gpu/drm/radeon/rv770.c196
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h47
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c156
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c138
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c78
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c31
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/i2c-core.c29
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/gpio-i2cmux.c184
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c32
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c155
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c373
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c80
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c45
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c57
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h11
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c51
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c62
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c392
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h46
-rw-r--r--drivers/macintosh/macio_asic.c7
-rw-r--r--drivers/macintosh/therm_pm72.c30
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c6
-rw-r--r--drivers/mmc/card/Kconfig1
-rw-r--r--drivers/mmc/core/Kconfig11
-rw-r--r--drivers/mmc/core/bus.c8
-rw-r--r--drivers/mmc/core/core.c206
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/debugfs.c5
-rw-r--r--drivers/mmc/core/host.c206
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c91
-rw-r--r--drivers/mmc/core/mmc_ops.c101
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c16
-rw-r--r--drivers/mmc/core/sdio.c36
-rw-r--r--drivers/mmc/core/sdio_bus.c32
-rw-r--r--drivers/mmc/host/Kconfig37
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/davinci_mmc.c80
-rw-r--r--drivers/mmc/host/dw_mmc.c1796
-rw-r--r--drivers/mmc/host/dw_mmc.h168
-rw-r--r--drivers/mmc/host/mxcmmc.c53
-rw-r--r--drivers/mmc/host/sdhci-dove.c70
-rw-r--r--drivers/mmc/host/sdhci-pci.c161
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c66
-rw-r--r--drivers/mmc/host/sdhci-tegra.c257
-rw-r--r--drivers/mmc/host/sdhci.c45
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/tmio_mmc.c561
-rw-r--r--drivers/mmc/host/tmio_mmc.h228
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/bfin_mac.c74
-rw-r--r--drivers/net/bfin_mac.h11
-rw-r--r--drivers/net/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h988
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c22
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h220
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c70
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h74
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c5
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c11
-rw-r--r--drivers/net/e1000/e1000_hw.c328
-rw-r--r--drivers/net/e1000/e1000_hw.h59
-rw-r--r--drivers/net/e1000/e1000_main.c35
-rw-r--r--drivers/net/e1000/e1000_osdep.h19
-rw-r--r--drivers/net/e1000e/82571.c77
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/es2lan.c4
-rw-r--r--drivers/net/e1000e/ethtool.c54
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c77
-rw-r--r--drivers/net/e1000e/lib.c3
-rw-r--r--drivers/net/e1000e/netdev.c53
-rw-r--r--drivers/net/e1000e/phy.c40
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/fec.c248
-rw-r--r--drivers/net/fec.h5
-rw-r--r--drivers/net/forcedeth.c34
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/irda/bfin_sir.h2
-rw-r--r--drivers/net/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c749
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c142
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c169
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h91
-rw-r--r--drivers/net/mlx4/alloc.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c3
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/ppp_async.c10
-rw-r--r--drivers/net/ppp_deflate.c9
-rw-r--r--drivers/net/ppp_generic.c9
-rw-r--r--drivers/net/ppp_mppe.c7
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/qlcnic/qlcnic.h24
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c63
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/r8169.c143
-rw-r--r--drivers/net/sky2.c143
-rw-r--r--drivers/net/sky2.h6
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ps3/Makefile2
-rw-r--r--drivers/rtc/class.c13
-rw-r--r--drivers/rtc/interface.c574
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-dev.c104
-rw-r--r--drivers/rtc/rtc-ds1307.c12
-rw-r--r--drivers/rtc/rtc-lib.c28
-rw-r--r--drivers/serial/Kconfig70
-rw-r--r--drivers/serial/bfin_5xx.c638
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--fs/9p/Kconfig5
-rw-r--r--fs/9p/Makefile1
-rw-r--r--fs/9p/acl.c4
-rw-r--r--fs/9p/v9fs.h42
-rw-r--r--fs/9p/vfs_inode.c871
-rw-r--r--fs/9p/vfs_inode_dotl.c824
-rw-r--r--fs/9p/xattr.c2
-rw-r--r--fs/ext2/dir.c19
-rw-r--r--fs/ext2/namei.c2
-rw-r--r--fs/ext2/super.c25
-rw-r--r--fs/ext2/xattr.c10
-rw-r--r--fs/ext3/balloc.c266
-rw-r--r--fs/ext3/dir.c15
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/ioctl.c22
-rw-r--r--fs/ext3/namei.c138
-rw-r--r--fs/ext3/resize.c65
-rw-r--r--fs/ext3/super.c64
-rw-r--r--fs/ext3/xattr.c2
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/dir.c56
-rw-r--r--fs/ext4/ext4.h93
-rw-r--r--fs/ext4/ext4_extents.h8
-rw-r--r--fs/ext4/ext4_jbd2.h2
-rw-r--r--fs/ext4/extents.c88
-rw-r--r--fs/ext4/file.c22
-rw-r--r--fs/ext4/fsync.c4
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c74
-rw-r--r--fs/ext4/mballoc.c55
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/namei.c69
-rw-r--r--fs/ext4/page-io.c7
-rw-r--r--fs/ext4/resize.c64
-rw-r--r--fs/ext4/super.c288
-rw-r--r--fs/ext4/xattr.c28
-rw-r--r--fs/jbd2/journal.c34
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/lockd/Makefile6
-rw-r--r--fs/lockd/clnt4xdr.c605
-rw-r--r--fs/lockd/clntlock.c4
-rw-r--r--fs/lockd/clntproc.c18
-rw-r--r--fs/lockd/clntxdr.c627
-rw-r--r--fs/lockd/host.c409
-rw-r--r--fs/lockd/mon.c110
-rw-r--r--fs/lockd/svc4proc.c20
-rw-r--r--fs/lockd/svclock.c34
-rw-r--r--fs/lockd/svcproc.c28
-rw-r--r--fs/lockd/xdr.c287
-rw-r--r--fs/lockd/xdr4.c255
-rw-r--r--fs/mbcache.c12
-rw-r--r--fs/nfs/callback.c83
-rw-r--r--fs/nfs/callback.h59
-rw-r--r--fs/nfs/callback_proc.c326
-rw-r--r--fs/nfs/callback_xdr.c143
-rw-r--r--fs/nfs/client.c302
-rw-r--r--fs/nfs/delegation.c362
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c72
-rw-r--r--fs/nfs/idmap.c2
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/internal.h19
-rw-r--r--fs/nfs/mount_clnt.c83
-rw-r--r--fs/nfs/nfs2xdr.c1294
-rw-r--r--fs/nfs/nfs3xdr.c2817
-rw-r--r--fs/nfs/nfs4_fs.h13
-rw-r--r--fs/nfs/nfs4filelayout.c6
-rw-r--r--fs/nfs/nfs4proc.c188
-rw-r--r--fs/nfs/nfs4renewd.c11
-rw-r--r--fs/nfs/nfs4state.c293
-rw-r--r--fs/nfs/nfs4xdr.c1426
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c524
-rw-r--r--fs/nfs/pnfs.h76
-rw-r--r--fs/nfs/proc.c5
-rw-r--r--fs/nfs/super.c18
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfsd/nfs4callback.c690
-rw-r--r--fs/ocfs2/Kconfig2
-rw-r--r--fs/ocfs2/alloc.c77
-rw-r--r--fs/ocfs2/alloc.h4
-rw-r--r--fs/ocfs2/aops.c59
-rw-r--r--fs/ocfs2/cluster/heartbeat.c246
-rw-r--r--fs/ocfs2/cluster/netdebug.c286
-rw-r--r--fs/ocfs2/cluster/tcp.c145
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h33
-rw-r--r--fs/ocfs2/dlm/dlmast.c76
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h86
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c200
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h5
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c10
-rw-r--r--fs/ocfs2/dlm/dlmlock.c3
-rw-r--r--fs/ocfs2/dlm/dlmthread.c132
-rw-r--r--fs/ocfs2/namei.c5
-rw-r--r--fs/ocfs2/ocfs2.h5
-rw-r--r--fs/quota/dquot.c18
-rw-r--r--fs/quota/quota_tree.c9
-rw-r--r--fs/udf/Kconfig1
-rw-r--r--fs/udf/balloc.c3
-rw-r--r--fs/udf/dir.c5
-rw-r--r--fs/udf/file.c11
-rw-r--r--fs/udf/ialloc.c21
-rw-r--r--fs/udf/inode.c51
-rw-r--r--fs/udf/namei.c107
-rw-r--r--fs/udf/partition.c27
-rw-r--r--fs/udf/super.c67
-rw-r--r--fs/udf/symlink.c12
-rw-r--r--fs/udf/udf_i.h13
-rw-r--r--fs/udf/udf_sb.h22
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/xfs/linux-2.6/sv.h59
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c425
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h16
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c235
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h22
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c22
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c92
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h59
-rw-r--r--fs/xfs/quota/xfs_dquot.c1
-rw-r--r--fs/xfs/xfs_ag.h2
-rw-r--r--fs/xfs/xfs_alloc.c351
-rw-r--r--fs/xfs/xfs_attr_leaf.c4
-rw-r--r--fs/xfs/xfs_btree.c9
-rw-r--r--fs/xfs/xfs_buf_item.c32
-rw-r--r--fs/xfs/xfs_buf_item.h11
-rw-r--r--fs/xfs/xfs_extfree_item.c97
-rw-r--r--fs/xfs/xfs_extfree_item.h11
-rw-r--r--fs/xfs/xfs_fsops.c1
-rw-r--r--fs/xfs/xfs_iget.c79
-rw-r--r--fs/xfs/xfs_inode.c54
-rw-r--r--fs/xfs/xfs_inode.h15
-rw-r--r--fs/xfs/xfs_inode_item.c90
-rw-r--r--fs/xfs/xfs_iomap.c233
-rw-r--r--fs/xfs/xfs_iomap.h27
-rw-r--r--fs/xfs/xfs_log.c739
-rw-r--r--fs/xfs/xfs_log_cil.c17
-rw-r--r--fs/xfs/xfs_log_priv.h127
-rw-r--r--fs/xfs/xfs_log_recover.c620
-rw-r--r--fs/xfs/xfs_mount.c23
-rw-r--r--fs/xfs/xfs_mount.h14
-rw-r--r--fs/xfs/xfs_trans.c79
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c232
-rw-r--r--fs/xfs/xfs_trans_extfree.c8
-rw-r--r--fs/xfs/xfs_trans_priv.h35
-rw-r--r--fs/xfs/xfs_vnodeops.c61
-rw-r--r--include/asm-generic/io.h30
-rw-r--r--include/drm/drmP.h102
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_fb_helper.h3
-rw-r--r--include/drm/drm_mm.h7
-rw-r--r--include/drm/drm_pciids.h40
-rw-r--r--include/drm/i915_drm.h12
-rw-r--r--include/drm/intel-gtt.h35
-rw-r--r--include/drm/nouveau_drm.h5
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h50
-rw-r--r--include/drm/ttm/ttm_bo_driver.h152
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h11
-rw-r--r--include/linux/agp_backend.h2
-rw-r--r--include/linux/bfin_mac.h1
-rw-r--r--include/linux/dynamic_debug.h18
-rw-r--r--include/linux/etherdevice.h4
-rw-r--r--include/linux/ext3_fs.h10
-rw-r--r--include/linux/fec.h3
-rw-r--r--include/linux/gpio-i2cmux.h38
-rw-r--r--include/linux/i2c.h27
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/intel-gtt.h20
-rw-r--r--include/linux/jbd2.h20
-rw-r--r--include/linux/kref.h2
-rw-r--r--include/linux/lockd/debug.h10
-rw-r--r--include/linux/lockd/lockd.h6
-rw-r--r--include/linux/mbcache.h11
-rw-r--r--include/linux/mfd/tmio.h5
-rw-r--r--include/linux/mmc/dw_mmc.h217
-rw-r--r--include/linux/mmc/host.h19
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mmc/sdhci.h6
-rw-r--r--include/linux/netdevice.h24
-rw-r--r--include/linux/netfilter/x_tables.h10
-rw-r--r--include/linux/nfs3.h3
-rw-r--r--include/linux/nfs4.h8
-rw-r--r--include/linux/nfs_fs_sb.h15
-rw-r--r--include/linux/nfs_xdr.h6
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/rtc.h51
-rw-r--r--include/linux/sunrpc/auth.h8
-rw-r--r--include/linux/sunrpc/bc_xprt.h15
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xdr.h14
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/net/ah.h2
-rw-r--r--include/net/arp.h1
-rw-r--r--include/net/phonet/phonet.h4
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/sock.h4
-rw-r--r--include/trace/define_trace.h10
-rw-r--r--include/trace/events/skb.h4
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/exit.c14
-rw-r--r--kernel/perf_event.c82
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--lib/dynamic_debug.c9
-rw-r--r--lib/kref.c30
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/chnl_net.c18
-rw-r--r--net/core/dev.c149
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/sysctl.c4
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/arp.c29
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c45
-rw-r--r--net/ipv4/netfilter/ip_tables.c45
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c45
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/phonet/af_phonet.c6
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_drr.c8
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c17
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sunrpc/auth.c28
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c44
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/clnt.c21
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/rpcb_clnt.c147
-rw-r--r--net/sunrpc/svc.c36
-rw-r--r--net/sunrpc/svcsock.c106
-rw-r--r--net/sunrpc/xdr.c155
-rw-r--r--net/xfrm/xfrm_user.c6
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-record.c3
-rw-r--r--tools/perf/builtin-sched.c5
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/builtin-test.c116
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/util/evsel.c87
-rw-r--r--tools/perf/util/evsel.h2
-rw-r--r--tools/perf/util/parse-events.c74
-rw-r--r--tools/perf/util/session.c2
885 files changed, 59306 insertions, 34521 deletions
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/gpio-i2cmux
new file mode 100644
index 00000000000..811cd78d4cd
--- /dev/null
+++ b/Documentation/i2c/muxes/gpio-i2cmux
@@ -0,0 +1,65 @@
+Kernel driver gpio-i2cmux
+
+Author: Peter Korsgaard <peter.korsgaard@barco.com>
+
+Description
+-----------
+
+gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
+from a master I2C bus and a hardware MUX controlled through GPIO pins.
+
+E.G.:
+
+ ---------- ---------- Bus segment 1 - - - - -
+ | | SCL/SDA | |-------------- | |
+ | |------------| |
+ | | | | Bus segment 2 | |
+ | Linux | GPIO 1..N | MUX |--------------- Devices
+ | |------------| | | |
+ | | | | Bus segment M
+ | | | |---------------| |
+ ---------- ---------- - - - - -
+
+SCL/SDA of the master I2C bus is multiplexed to bus segment 1..M
+according to the settings of the GPIO pins 1..N.
+
+Usage
+-----
+
+gpio-i2cmux uses the platform bus, so you need to provide a struct
+platform_device with the platform_data pointing to a struct
+gpio_i2cmux_platform_data with the I2C adapter number of the master
+bus, the number of bus segments to create and the GPIO pins used
+to control it. See include/linux/gpio-i2cmux.h for details.
+
+E.G. something like this for a MUX providing 4 bus segments
+controlled through 3 GPIO pins:
+
+#include <linux/gpio-i2cmux.h>
+#include <linux/platform_device.h>
+
+static const unsigned myboard_gpiomux_gpios[] = {
+ AT91_PIN_PC26, AT91_PIN_PC25, AT91_PIN_PC24
+};
+
+static const unsigned myboard_gpiomux_values[] = {
+ 0, 1, 2, 3
+};
+
+static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
+ .parent = 1,
+ .base_nr = 2, /* optional */
+ .values = myboard_gpiomux_values,
+ .n_values = ARRAY_SIZE(myboard_gpiomux_values),
+ .gpios = myboard_gpiomux_gpios,
+ .n_gpios = ARRAY_SIZE(myboard_gpiomux_gpios),
+ .idle = 4, /* optional */
+};
+
+static struct platform_device myboard_i2cmux = {
+ .name = "gpio-i2cmux",
+ .id = 0,
+ .dev = {
+ .platform_data = &myboard_i2cmux_data,
+ },
+};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f3dc951e949..ed3708f8d0d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -403,6 +403,10 @@ and is between 256 and 4096 characters. It is defined in the file
bttv.pll= See Documentation/video4linux/bttv/Insmod-options
bttv.tuner= and Documentation/video4linux/bttv/CARDLIST
+ bulk_remove=off [PPC] This parameter disables the use of the pSeries
+ firmware feature for flushing multiple hpte entries
+ at a time.
+
c101= [NET] Moxa C101 synchronous serial card
cachesize= [BUGS=X86-32] Override level 2 CPU cache size detection.
@@ -1490,6 +1494,10 @@ and is between 256 and 4096 characters. It is defined in the file
mtdparts= [MTD]
See drivers/mtd/cmdlinepart.c.
+ multitce=off [PPC] This parameter disables the use of the pSeries
+ firmware feature for updating multiple TCE entries
+ at a time.
+
onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration
Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index b395ca6a49f..811872b45be 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -167,6 +167,7 @@ rx_ccid = 2
seq_window = 100
The initial sequence window (sec. 7.5.2) of the sender. This influences
the local ackno validity and the remote seqno validity windows (7.5.1).
+ Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
tx_qlen = 5
The size of the transmit buffer in packets. A value of 0 corresponds
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 302db5da49b..3272ed59dec 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -131,7 +131,7 @@ order to avoid the degeneration that had become the ppc32 kernel entry
point and the way a new platform should be added to the kernel. The
legacy iSeries platform breaks those rules as it predates this scheme,
but no new board support will be accepted in the main tree that
-doesn't follows them properly. In addition, since the advent of the
+doesn't follow them properly. In addition, since the advent of the
arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit
platforms and 32-bit platforms which move into arch/powerpc will be
required to use these rules as well.
@@ -1025,7 +1025,7 @@ dtc source code can be found at
WARNING: This version is still in early development stage; the
resulting device-tree "blobs" have not yet been validated with the
-kernel. The current generated bloc lacks a useful reserve map (it will
+kernel. The current generated block lacks a useful reserve map (it will
be fixed to generate an empty one, it's up to the bootloader to fill
it up) among others. The error handling needs work, bugs are lurking,
etc...
diff --git a/Documentation/powerpc/dts-bindings/4xx/cpm.txt b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
new file mode 100644
index 00000000000..ee459806d35
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
@@ -0,0 +1,52 @@
+PPC4xx Clock Power Management (CPM) node
+
+Required properties:
+ - compatible : compatible list, currently only "ibm,cpm"
+ - dcr-access-method : "native"
+ - dcr-reg : < DCR register range >
+
+Optional properties:
+ - er-offset : All 4xx SoCs with a CPM controller have
+ one of two different order for the CPM
+ registers. Some have the CPM registers
+ in the following order (ER,FR,SR). The
+ others have them in the following order
+ (SR,ER,FR). For the second case set
+ er-offset = <1>.
+ - unused-units : specifier consist of one cell. For each
+ bit in the cell, the corresponding bit
+ in CPM will be set to turn off unused
+ devices.
+ - idle-doze : specifier consist of one cell. For each
+ bit in the cell, the corresponding bit
+ in CPM will be set to turn off unused
+ devices. This is usually just CPM[CPU].
+ - standby : specifier consist of one cell. For each
+ bit in the cell, the corresponding bit
+ in CPM will be set on standby and
+ restored on resume.
+ - suspend : specifier consist of one cell. For each
+ bit in the cell, the corresponding bit
+ in CPM will be set on suspend (mem) and
+ restored on resume. Note, for standby
+ and suspend the corresponding bits can
+ be different or the same. Usually for
+ standby only class 2 and 3 units are set.
+ However, the interface does not care.
+ If they are the same, the additional
+ power saving will be seeing if support
+ is available to put the DDR in self
+ refresh mode and any additional power
+ saving techniques for the specific SoC.
+
+Example:
+ CPM0: cpm {
+ compatible = "ibm,cpm";
+ dcr-access-method = "native";
+ dcr-reg = <0x160 0x003>;
+ er-offset = <0>;
+ unused-units = <0x00000100>;
+ idle-doze = <0x02000000>;
+ standby = <0xfeff0000>;
+ suspend = <0xfeff791d>;
+};
diff --git a/MAINTAINERS b/MAINTAINERS
index aca102f758b..42f991e5a85 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -285,6 +285,41 @@ L: linux-parisc@vger.kernel.org
S: Maintained
F: sound/pci/ad1889.*
+AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/AD5254
+S: Supported
+F: drivers/misc/ad525x_dpot.c
+
+AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/AD5398
+S: Supported
+F: drivers/regulator/ad5398.c
+
+AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/AD7142
+S: Supported
+F: drivers/input/misc/ad714x.c
+
+AD7877 TOUCHSCREEN DRIVER
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/AD7877
+S: Supported
+F: drivers/input/touchscreen/ad7877.c
+
+AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/AD7879
+S: Supported
+F: drivers/input/touchscreen/ad7879.c
+
ADM1025 HARDWARE MONITOR DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: lm-sensors@lm-sensors.org
@@ -304,6 +339,32 @@ W: http://linuxwireless.org/
S: Orphan
F: drivers/net/wireless/adm8211.*
+ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/ADP5520
+S: Supported
+F: drivers/mfd/adp5520.c
+F: drivers/video/backlight/adp5520_bl.c
+F: drivers/led/leds-adp5520.c
+F: drivers/gpio/adp5520-gpio.c
+F: drivers/input/keyboard/adp5520-keys.c
+
+ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/ADP5588
+S: Supported
+F: drivers/input/keyboard/adp5588-keys.c
+F: drivers/gpio/adp5588-gpio.c
+
+ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/ADP8860
+S: Supported
+F: drivers/video/backlight/adp8860_bl.c
+
ADT746X FAN DRIVER
M: Colin Leroy <colin@colino.net>
S: Maintained
@@ -316,6 +377,13 @@ S: Maintained
F: Documentation/hwmon/adt7475
F: drivers/hwmon/adt7475.c
+ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
+M: Michael Hennerich <michael.hennerich@analog.com>
+L: device-driver-devel@blackfin.uclinux.org
+W: http://wiki-analog.com/ADXL345
+S: Supported
+F: drivers/input/misc/adxl34x.c
+
ADVANSYS SCSI DRIVER
M: Matthew Wilcox <matthew@wil.cx>
L: linux-scsi@vger.kernel.org
@@ -440,17 +508,23 @@ L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/hw/amso1100/
-ANALOG DEVICES INC ASOC DRIVERS
-L: uclinux-dist-devel@blackfin.uclinux.org
+ANALOG DEVICES INC ASOC CODEC DRIVERS
+L: device-driver-devel@blackfin.uclinux.org
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-W: http://blackfin.uclinux.org/
+W: http://wiki-analog.com/
S: Supported
-F: sound/soc/blackfin/*
F: sound/soc/codecs/ad1*
F: sound/soc/codecs/adau*
F: sound/soc/codecs/adav*
F: sound/soc/codecs/ssm*
+ANALOG DEVICES INC ASOC DRIVERS
+L: uclinux-dist-devel@blackfin.uclinux.org
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+W: http://blackfin.uclinux.org/
+S: Supported
+F: sound/soc/blackfin/*
+
AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg <johannes@sipsolutions.net>
L: linuxppc-dev@lists.ozlabs.org
@@ -1711,7 +1785,8 @@ S: Maintained
F: drivers/usb/atm/cxacru.c
CONFIGFS
-M: Joel Becker <joel.becker@oracle.com>
+M: Joel Becker <jlbec@evilplan.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs.git
S: Supported
F: fs/configfs/
F: include/linux/configfs.h
@@ -2618,6 +2693,14 @@ S: Supported
F: drivers/i2c/busses/i2c-gpio.c
F: include/linux/i2c-gpio.h
+GENERIC GPIO I2C MULTIPLEXER DRIVER
+M: Peter Korsgaard <peter.korsgaard@barco.com>
+L: linux-i2c@vger.kernel.org
+S: Supported
+F: drivers/i2c/muxes/gpio-i2cmux.c
+F: include/linux/gpio-i2cmux.h
+F: Documentation/i2c/muxes/gpio-i2cmux
+
GENERIC HDLC (WAN) DRIVERS
M: Krzysztof Halasa <khc@pm.waw.pl>
W: http://www.kernel.org/pub/linux/utils/net/hdlc/
@@ -4467,7 +4550,7 @@ F: include/linux/oprofile.h
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mfasheh@suse.com>
-M: Joel Becker <joel.becker@oracle.com>
+M: Joel Becker <jlbec@evilplan.org>
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
W: http://oss.oracle.com/projects/ocfs2/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index f7a12586a1f..fe627aba6da 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -770,7 +770,7 @@ static struct resource dove_sdio0_resources[] = {
};
static struct platform_device dove_sdio0 = {
- .name = "sdhci-mv",
+ .name = "sdhci-dove",
.id = 0,
.dev = {
.dma_mask = &sdio_dmamask,
@@ -798,7 +798,7 @@ static struct resource dove_sdio1_resources[] = {
};
static struct platform_device dove_sdio1 = {
- .name = "sdhci-mv",
+ .name = "sdhci-dove",
.id = 1,
.dev = {
.dma_mask = &sdio_dmamask,
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
new file mode 100644
index 00000000000..3ad086e859c
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-arm/arch-tegra/include/mach/sdhci.h
+ *
+ * Copyright (C) 2009 Palm, Inc.
+ * Author: Yvonne Yip <y@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARM_ARCH_TEGRA_SDHCI_H
+#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
+
+#include <linux/mmc/host.h>
+
+struct tegra_sdhci_platform_data {
+ int cd_gpio;
+ int wp_gpio;
+ int power_gpio;
+ int is_8bit;
+};
+
+#endif
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 46738d49b7c..46f42b2066e 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -19,7 +19,7 @@ KBUILD_CFLAGS += -mlong-calls
endif
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
KBUILD_CFLAGS_MODULE += -mlong-calls
-KBUILD_LDFLAGS_MODULE += -m elf32bfin
+LDFLAGS += -m elf32bfin
KALLSYMS += --symbol-prefix=_
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
@@ -97,8 +97,11 @@ rev-$(CONFIG_BF_REV_0_6) := 0.6
rev-$(CONFIG_BF_REV_NONE) := none
rev-$(CONFIG_BF_REV_ANY) := any
-KBUILD_CFLAGS += -mcpu=$(cpu-y)-$(rev-y)
-KBUILD_AFLAGS += -mcpu=$(cpu-y)-$(rev-y)
+CPU_REV := $(cpu-y)-$(rev-y)
+export CPU_REV
+
+KBUILD_CFLAGS += -mcpu=$(CPU_REV)
+KBUILD_AFLAGS += -mcpu=$(CPU_REV)
# - we utilize the silicon rev from the toolchain, so move it over to the checkflags
CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }')
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 13d2dbd658e..0a49279e342 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -17,7 +17,7 @@ UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -a $(CONFIG_ROM_BASE) -x
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
- -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' \
+ -C $(2) -n '$(CPU_REV)-$(KERNELRELEASE)' \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
$(UIMAGE_OPTS-y) -d $< $@
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
new file mode 100644
index 00000000000..4cf451024fd
--- /dev/null
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -0,0 +1,113 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+# CONFIG_FUTEX is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+CONFIG_SLAB=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BF561=y
+CONFIG_SMP=y
+CONFIG_IRQ_TIMER0=10
+CONFIG_CLKIN_HZ=30000000
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+CONFIG_BFIN_GPTIMERS=m
+CONFIG_C_CDPRIO=y
+CONFIG_BANK_3=0xAAC2
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_IRDA=m
+CONFIG_IRLAN=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRTTY_SIR=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+CONFIG_MTD_PHYSMAP=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMC91X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT=m
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_BFIN_JTAG_COMM=m
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_BFIN=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_BFIN_WDT=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_JFFS2_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_SMB_FS=m
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_MMRS=y
+CONFIG_DEBUG_HWERR=y
+CONFIG_EXACT_HWERR=y
+CONFIG_DEBUG_DOUBLEFAULT=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CPLB_INFO=y
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
new file mode 100644
index 00000000000..0ebc7d9aa42
--- /dev/null
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -0,0 +1,121 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCALVERSION="DNP5370"
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_SLOB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_BF537=y
+CONFIG_BF_REV_0_3=y
+CONFIG_DNP5370=y
+CONFIG_IRQ_ERROR=7
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+CONFIG_C_CDPRIO=y
+CONFIG_C_AMBEN_B0_B1_B2=y
+CONFIG_PM=y
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_LLC2=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_DEBUG_VERBOSE=1
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_ABSENT=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UCLINUX=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_BLOCK2MTD=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_MISC_DEVICES is not set
+CONFIG_NETDEVICES=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_BFIN_MAC=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_BFIN_DMA_INTERFACE is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_BFIN_JTAG_COMM=y
+CONFIG_BFIN_JTAG_COMM_CONSOLE=y
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_UART0=y
+CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_BLACKFIN_TWI=y
+CONFIG_SPI=y
+CONFIG_SPI_BFIN=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_LM75=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_DMADEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_BOTH=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_DEBUG_KOBJECT=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_PAGE_POISONING=y
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_DOUBLEFAULT=y
+CONFIG_CPLB_INFO=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/blackfin/include/asm/bfin_dma.h b/arch/blackfin/include/asm/bfin_dma.h
new file mode 100644
index 00000000000..d5112074414
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_dma.h
@@ -0,0 +1,91 @@
+/*
+ * bfin_dma.h - Blackfin DMA defines/structures/etc...
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_DMA_H__
+#define __ASM_BFIN_DMA_H__
+
+#include <linux/types.h>
+
+/* DMA_CONFIG Masks */
+#define DMAEN 0x0001 /* DMA Channel Enable */
+#define WNR 0x0002 /* Channel Direction (W/R*) */
+#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
+#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
+#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
+#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
+#define RESTART 0x0020 /* DMA Buffer Clear */
+#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
+#define DI_EN 0x0080 /* Data Interrupt Enable */
+#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
+#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
+#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
+#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
+#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
+#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
+#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
+#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
+#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
+#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
+#define NDSIZE 0x0f00 /* Next Descriptor Size */
+#define DMAFLOW 0x7000 /* Flow Control */
+#define DMAFLOW_STOP 0x0000 /* Stop Mode */
+#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
+#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
+#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
+#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
+
+/* DMA_IRQ_STATUS Masks */
+#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
+#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
+#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
+#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
+
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits. So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/*
+ * bfin dma registers layout
+ */
+struct bfin_dma_regs {
+ u32 next_desc_ptr;
+ u32 start_addr;
+ __BFP(config);
+ u32 __pad0;
+ __BFP(x_count);
+ __BFP(x_modify);
+ __BFP(y_count);
+ __BFP(y_modify);
+ u32 curr_desc_ptr;
+ u32 curr_addr;
+ __BFP(irq_status);
+ __BFP(peripheral_map);
+ __BFP(curr_x_count);
+ u32 __pad1;
+ __BFP(curr_y_count);
+ u32 __pad2;
+};
+
+/*
+ * bfin handshake mdma registers layout
+ */
+struct bfin_hmdma_regs {
+ __BFP(control);
+ __BFP(ecinit);
+ __BFP(bcinit);
+ __BFP(ecurgent);
+ __BFP(ecoverflow);
+ __BFP(ecount);
+ __BFP(bcount);
+};
+
+#undef __BFP
+
+#endif
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
new file mode 100644
index 00000000000..1ff9f1468c0
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -0,0 +1,275 @@
+/*
+ * bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_ASM_SERIAL_H__
+#define __BFIN_ASM_SERIAL_H__
+
+#include <linux/serial_core.h>
+#include <mach/anomaly.h>
+#include <mach/bfin_serial.h>
+
+#if defined(CONFIG_BFIN_UART0_CTSRTS) || \
+ defined(CONFIG_BFIN_UART1_CTSRTS) || \
+ defined(CONFIG_BFIN_UART2_CTSRTS) || \
+ defined(CONFIG_BFIN_UART3_CTSRTS)
+# ifdef BFIN_UART_BF54X_STYLE
+# define CONFIG_SERIAL_BFIN_HARD_CTSRTS
+# else
+# define CONFIG_SERIAL_BFIN_CTSRTS
+# endif
+#endif
+
+struct circ_buf;
+struct timer_list;
+struct work_struct;
+
+struct bfin_serial_port {
+ struct uart_port port;
+ unsigned int old_status;
+ int status_irq;
+#ifndef BFIN_UART_BF54X_STYLE
+ unsigned int lsr;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ int tx_done;
+ int tx_count;
+ struct circ_buf rx_dma_buf;
+ struct timer_list rx_dma_timer;
+ int rx_dma_nrows;
+ unsigned int tx_dma_channel;
+ unsigned int rx_dma_channel;
+ struct work_struct tx_dma_workqueue;
+#elif ANOMALY_05000363
+ unsigned int anomaly_threshold;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ int scts;
+#endif
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+ int cts_pin;
+ int rts_pin;
+#endif
+};
+
+/* UART_LCR Masks */
+#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
+#define STB 0x04 /* Stop Bits */
+#define PEN 0x08 /* Parity Enable */
+#define EPS 0x10 /* Even Parity Select */
+#define STP 0x20 /* Stick Parity */
+#define SB 0x40 /* Set Break */
+#define DLAB 0x80 /* Divisor Latch Access */
+
+/* UART_LSR Masks */
+#define DR 0x01 /* Data Ready */
+#define OE 0x02 /* Overrun Error */
+#define PE 0x04 /* Parity Error */
+#define FE 0x08 /* Framing Error */
+#define BI 0x10 /* Break Interrupt */
+#define THRE 0x20 /* THR Empty */
+#define TEMT 0x40 /* TSR and UART_THR Empty */
+#define TFI 0x80 /* Transmission Finished Indicator */
+
+/* UART_IER Masks */
+#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
+#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
+#define ELSI 0x04 /* Enable RX Status Interrupt */
+#define EDSSI 0x08 /* Enable Modem Status Interrupt */
+#define EDTPTI 0x10 /* Enable DMA Transmit PIRQ Interrupt */
+#define ETFI 0x20 /* Enable Transmission Finished Interrupt */
+#define ERFCI 0x40 /* Enable Receive FIFO Count Interrupt */
+
+/* UART_MCR Masks */
+#define XOFF 0x01 /* Transmitter Off */
+#define MRTS 0x02 /* Manual Request To Send */
+#define RFIT 0x04 /* Receive FIFO IRQ Threshold */
+#define RFRT 0x08 /* Receive FIFO RTS Threshold */
+#define LOOP_ENA 0x10 /* Loopback Mode Enable */
+#define FCPOL 0x20 /* Flow Control Pin Polarity */
+#define ARTS 0x40 /* Automatic Request To Send */
+#define ACTS 0x80 /* Automatic Clear To Send */
+
+/* UART_MSR Masks */
+#define SCTS 0x01 /* Sticky CTS */
+#define CTS 0x10 /* Clear To Send */
+#define RFCS 0x20 /* Receive FIFO Count Status */
+
+/* UART_GCTL Masks */
+#define UCEN 0x01 /* Enable UARTx Clocks */
+#define IREN 0x02 /* Enable IrDA Mode */
+#define TPOLC 0x04 /* IrDA TX Polarity Change */
+#define RPOLC 0x08 /* IrDA RX Polarity Change */
+#define FPE 0x10 /* Force Parity Error On Transmit */
+#define FFE 0x20 /* Force Framing Error On Transmit */
+
+#ifdef BFIN_UART_BF54X_STYLE
+# define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
+# define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
+# define OFFSET_GCTL 0x08 /* Global Control Register */
+# define OFFSET_LCR 0x0C /* Line Control Register */
+# define OFFSET_MCR 0x10 /* Modem Control Register */
+# define OFFSET_LSR 0x14 /* Line Status Register */
+# define OFFSET_MSR 0x18 /* Modem Status Register */
+# define OFFSET_SCR 0x1C /* SCR Scratch Register */
+# define OFFSET_IER_SET 0x20 /* Set Interrupt Enable Register */
+# define OFFSET_IER_CLEAR 0x24 /* Clear Interrupt Enable Register */
+# define OFFSET_THR 0x28 /* Transmit Holding register */
+# define OFFSET_RBR 0x2C /* Receive Buffer register */
+#else /* BF533 style */
+# define OFFSET_THR 0x00 /* Transmit Holding register */
+# define OFFSET_RBR 0x00 /* Receive Buffer register */
+# define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
+# define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
+# define OFFSET_IER 0x04 /* Interrupt Enable Register */
+# define OFFSET_IIR 0x08 /* Interrupt Identification Register */
+# define OFFSET_LCR 0x0C /* Line Control Register */
+# define OFFSET_MCR 0x10 /* Modem Control Register */
+# define OFFSET_LSR 0x14 /* Line Status Register */
+# define OFFSET_MSR 0x18 /* Modem Status Register */
+# define OFFSET_SCR 0x1C /* SCR Scratch Register */
+# define OFFSET_GCTL 0x24 /* Global Control Register */
+/* code should not need IIR, so force build error if they use it */
+# undef OFFSET_IIR
+#endif
+
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits. So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+struct bfin_uart_regs {
+#ifdef BFIN_UART_BF54X_STYLE
+ __BFP(dll);
+ __BFP(dlh);
+ __BFP(gctl);
+ __BFP(lcr);
+ __BFP(mcr);
+ __BFP(lsr);
+ __BFP(msr);
+ __BFP(scr);
+ __BFP(ier_set);
+ __BFP(ier_clear);
+ __BFP(thr);
+ __BFP(rbr);
+#else
+ union {
+ u16 dll;
+ u16 thr;
+ const u16 rbr;
+ };
+ const u16 __pad0;
+ union {
+ u16 dlh;
+ u16 ier;
+ };
+ const u16 __pad1;
+ const __BFP(iir);
+ __BFP(lcr);
+ __BFP(mcr);
+ __BFP(lsr);
+ __BFP(msr);
+ __BFP(scr);
+ const u32 __pad2;
+ __BFP(gctl);
+#endif
+};
+#undef __BFP
+
+#ifndef port_membase
+# define port_membase(p) (((struct bfin_serial_port *)(p))->port.membase)
+#endif
+
+#define UART_GET_CHAR(p) bfin_read16(port_membase(p) + OFFSET_RBR)
+#define UART_GET_DLL(p) bfin_read16(port_membase(p) + OFFSET_DLL)
+#define UART_GET_DLH(p) bfin_read16(port_membase(p) + OFFSET_DLH)
+#define UART_GET_GCTL(p) bfin_read16(port_membase(p) + OFFSET_GCTL)
+#define UART_GET_LCR(p) bfin_read16(port_membase(p) + OFFSET_LCR)
+#define UART_GET_MCR(p) bfin_read16(port_membase(p) + OFFSET_MCR)
+#define UART_GET_MSR(p) bfin_read16(port_membase(p) + OFFSET_MSR)
+
+#define UART_PUT_CHAR(p, v) bfin_write16(port_membase(p) + OFFSET_THR, v)
+#define UART_PUT_DLL(p, v) bfin_write16(port_membase(p) + OFFSET_DLL, v)
+#define UART_PUT_DLH(p, v) bfin_write16(port_membase(p) + OFFSET_DLH, v)
+#define UART_PUT_GCTL(p, v) bfin_write16(port_membase(p) + OFFSET_GCTL, v)
+#define UART_PUT_LCR(p, v) bfin_write16(port_membase(p) + OFFSET_LCR, v)
+#define UART_PUT_MCR(p, v) bfin_write16(port_membase(p) + OFFSET_MCR, v)
+
+#ifdef BFIN_UART_BF54X_STYLE
+
+#define UART_CLEAR_IER(p, v) bfin_write16(port_membase(p) + OFFSET_IER_CLEAR, v)
+#define UART_GET_IER(p) bfin_read16(port_membase(p) + OFFSET_IER_SET)
+#define UART_SET_IER(p, v) bfin_write16(port_membase(p) + OFFSET_IER_SET, v)
+
+#define UART_CLEAR_DLAB(p) /* MMRs not muxed on BF54x */
+#define UART_SET_DLAB(p) /* MMRs not muxed on BF54x */
+
+#define UART_CLEAR_LSR(p) bfin_write16(port_membase(p) + OFFSET_LSR, -1)
+#define UART_GET_LSR(p) bfin_read16(port_membase(p) + OFFSET_LSR)
+#define UART_PUT_LSR(p, v) bfin_write16(port_membase(p) + OFFSET_LSR, v)
+
+/* This handles hard CTS/RTS */
+#define BFIN_UART_CTSRTS_HARD
+#define UART_CLEAR_SCTS(p) bfin_write16((port_membase(p) + OFFSET_MSR), SCTS)
+#define UART_GET_CTS(x) (UART_GET_MSR(x) & CTS)
+#define UART_DISABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS | MRTS))
+#define UART_ENABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
+#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
+#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
+
+#else /* BF533 style */
+
+#define UART_CLEAR_IER(p, v) UART_PUT_IER(p, UART_GET_IER(p) & ~(v))
+#define UART_GET_IER(p) bfin_read16(port_membase(p) + OFFSET_IER)
+#define UART_PUT_IER(p, v) bfin_write16(port_membase(p) + OFFSET_IER, v)
+#define UART_SET_IER(p, v) UART_PUT_IER(p, UART_GET_IER(p) | (v))
+
+#define UART_CLEAR_DLAB(p) do { UART_PUT_LCR(p, UART_GET_LCR(p) & ~DLAB); SSYNC(); } while (0)
+#define UART_SET_DLAB(p) do { UART_PUT_LCR(p, UART_GET_LCR(p) | DLAB); SSYNC(); } while (0)
+
+#ifndef put_lsr_cache
+# define put_lsr_cache(p, v) (((struct bfin_serial_port *)(p))->lsr = (v))
+#endif
+#ifndef get_lsr_cache
+# define get_lsr_cache(p) (((struct bfin_serial_port *)(p))->lsr)
+#endif
+
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline void UART_CLEAR_LSR(void *p)
+{
+ put_lsr_cache(p, 0);
+ bfin_write16(port_membase(p) + OFFSET_LSR, -1);
+}
+static inline unsigned int UART_GET_LSR(void *p)
+{
+ unsigned int lsr = bfin_read16(port_membase(p) + OFFSET_LSR);
+ put_lsr_cache(p, get_lsr_cache(p) | (lsr & (BI|FE|PE|OE)));
+ return lsr | get_lsr_cache(p);
+}
+static inline void UART_PUT_LSR(void *p, uint16_t val)
+{
+ put_lsr_cache(p, get_lsr_cache(p) & ~val);
+}
+
+/* This handles soft CTS/RTS */
+#define UART_GET_CTS(x) gpio_get_value((x)->cts_pin)
+#define UART_DISABLE_RTS(x) gpio_set_value((x)->rts_pin, 1)
+#define UART_ENABLE_RTS(x) gpio_set_value((x)->rts_pin, 0)
+#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
+#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
+
+#endif
+
+#ifndef BFIN_UART_TX_FIFO_SIZE
+# define BFIN_UART_TX_FIFO_SIZE 2
+#endif
+
+#endif /* __BFIN_ASM_SERIAL_H__ */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 3f7ef4d9779..29f4fd88617 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -108,7 +108,9 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
+#define test_bit __skip_test_bit
#include <asm-generic/bitops/non-atomic.h>
+#undef test_bit
#endif /* CONFIG_SMP */
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index bd0641a267f..568885a2c28 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -7,6 +7,8 @@
#ifndef __ARCH_BLACKFIN_CACHE_H
#define __ARCH_BLACKFIN_CACHE_H
+#include <linux/linkage.h> /* for asmlinkage */
+
/*
* Bytes per L1 cache line
* Blackfin loads 32 bytes for cache
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 2666ff8ea95..77135b62818 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -11,6 +11,9 @@
#include <asm/blackfin.h> /* for SSYNC() */
#include <asm/sections.h> /* for _ramend */
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#endif
extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index eedf3ca65ba..d9dbc1a5353 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -14,40 +14,7 @@
#include <asm/blackfin.h>
#include <asm/page.h>
#include <asm-generic/dma.h>
-
-/* DMA_CONFIG Masks */
-#define DMAEN 0x0001 /* DMA Channel Enable */
-#define WNR 0x0002 /* Channel Direction (W/R*) */
-#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
-#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
-#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
-#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
-#define RESTART 0x0020 /* DMA Buffer Clear */
-#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
-#define DI_EN 0x0080 /* Data Interrupt Enable */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-#define NDSIZE 0x0f00 /* Next Descriptor Size */
-#define DMAFLOW 0x7000 /* Flow Control */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
-
-/* DMA_IRQ_STATUS Masks */
-#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
-#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
-#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
-#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
+#include <asm/bfin_dma.h>
/*-------------------------
* config reg bits value
@@ -149,7 +116,7 @@ void blackfin_dma_resume(void);
* DMA API's
*******************************************************************************/
extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
-extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS];
+extern struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS];
extern int channel2irq(unsigned int channel);
static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index efcc3aebeae..3047120cfcf 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -9,6 +9,8 @@
#ifndef _BLACKFIN_DPMC_H_
#define _BLACKFIN_DPMC_H_
+#include <mach/pll.h>
+
/* PLL_CTL Masks */
#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
#define PLL_OFF 0x0002 /* PLL Not Powered */
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index 234fbac17ec..dccae26805b 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,148 +7,48 @@
#ifndef _BFIN_IO_H
#define _BFIN_IO_H
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
#include <linux/compiler.h>
-
-/*
- * These are for ISA/PCI shared memory _only_ and should never be used
- * on any other type of memory, including Zorro memory. They are meant to
- * access the bus in the bus byte order which is little-endian!.
- *
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the bfin architecture, we just read/write the
- * memory location directly.
- */
-#ifndef __ASSEMBLY__
-
-static inline unsigned char readb(const volatile void __iomem *addr)
-{
- unsigned int val;
- int tmp;
-
- __asm__ __volatile__ (
- "cli %1;"
- "NOP; NOP; SSYNC;"
- "%0 = b [%2] (z);"
- "sti %1;"
- : "=d"(val), "=d"(tmp)
- : "a"(addr)
- );
-
- return (unsigned char) val;
-}
-
-static inline unsigned short readw(const volatile void __iomem *addr)
-{
- unsigned int val;
- int tmp;
-
- __asm__ __volatile__ (
- "cli %1;"
- "NOP; NOP; SSYNC;"
- "%0 = w [%2] (z);"
- "sti %1;"
- : "=d"(val), "=d"(tmp)
- : "a"(addr)
- );
-
- return (unsigned short) val;
-}
-
-static inline unsigned int readl(const volatile void __iomem *addr)
-{
- unsigned int val;
- int tmp;
-
- __asm__ __volatile__ (
- "cli %1;"
- "NOP; NOP; SSYNC;"
- "%0 = [%2];"
- "sti %1;"
- : "=d"(val), "=d"(tmp)
- : "a"(addr)
- );
-
- return val;
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define DECLARE_BFIN_RAW_READX(size, type, asm, asm_sign) \
+static inline type __raw_read##size(const volatile void __iomem *addr) \
+{ \
+ unsigned int val; \
+ int tmp; \
+ __asm__ __volatile__ ( \
+ "cli %1;" \
+ "NOP; NOP; SSYNC;" \
+ "%0 = "#asm" [%2] "#asm_sign";" \
+ "sti %1;" \
+ : "=d"(val), "=d"(tmp) \
+ : "a"(addr) \
+ ); \
+ return (type) val; \
}
-
-#endif /* __ASSEMBLY__ */
-
-#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
-#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
-#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
-
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-#define memset_io(a, b, c) memset((void *)(a), (b), (c))
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
-
-/* Convert "I/O port addresses" to actual addresses. i.e. ugly casts. */
-#define __io(port) ((void *)(unsigned long)(port))
-
-#define inb(port) readb(__io(port))
-#define inw(port) readw(__io(port))
-#define inl(port) readl(__io(port))
-#define outb(x, port) writeb(x, __io(port))
-#define outw(x, port) writew(x, __io(port))
-#define outl(x, port) writel(x, __io(port))
-
-#define inb_p(port) inb(__io(port))
-#define inw_p(port) inw(__io(port))
-#define inl_p(port) inl(__io(port))
-#define outb_p(x, port) outb(x, __io(port))
-#define outw_p(x, port) outw(x, __io(port))
-#define outl_p(x, port) outl(x, __io(port))
-
-#define ioread8_rep(a, d, c) readsb(a, d, c)
-#define ioread16_rep(a, d, c) readsw(a, d, c)
-#define ioread32_rep(a, d, c) readsl(a, d, c)
-#define iowrite8_rep(a, s, c) writesb(a, s, c)
-#define iowrite16_rep(a, s, c) writesw(a, s, c)
-#define iowrite32_rep(a, s, c) writesl(a, s, c)
-
-#define ioread8(x) readb(x)
-#define ioread16(x) readw(x)
-#define ioread32(x) readl(x)
-#define iowrite8(val, x) writeb(val, x)
-#define iowrite16(val, x) writew(val, x)
-#define iowrite32(val, x) writel(val, x)
-
-/**
- * I/O write barrier
- *
- * Ensure ordering of I/O space writes. This will make sure that writes
- * following the barrier will arrive after all previous writes.
- */
-#define mmiowb() do { SSYNC(); wmb(); } while (0)
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/* Values for nocacheflag and cmode */
-#define IOMAP_NOCACHE_SER 1
-
-#ifndef __ASSEMBLY__
+DECLARE_BFIN_RAW_READX(b, u8, b, (z))
+#define __raw_readb __raw_readb
+DECLARE_BFIN_RAW_READX(w, u16, w, (z))
+#define __raw_readw __raw_readw
+DECLARE_BFIN_RAW_READX(l, u32, , )
+#define __raw_readl __raw_readl
extern void outsb(unsigned long port, const void *addr, unsigned long count);
extern void outsw(unsigned long port, const void *addr, unsigned long count);
extern void outsw_8(unsigned long port, const void *addr, unsigned long count);
extern void outsl(unsigned long port, const void *addr, unsigned long count);
+#define outsb outsb
+#define outsw outsw
+#define outsl outsl
extern void insb(unsigned long port, void *addr, unsigned long count);
extern void insw(unsigned long port, void *addr, unsigned long count);
extern void insw_8(unsigned long port, void *addr, unsigned long count);
extern void insl(unsigned long port, void *addr, unsigned long count);
extern void insl_16(unsigned long port, void *addr, unsigned long count);
+#define insb insb
+#define insw insw
+#define insl insl
extern void dma_outsb(unsigned long port, const void *addr, unsigned short count);
extern void dma_outsw(unsigned long port, const void *addr, unsigned short count);
@@ -158,108 +58,14 @@ extern void dma_insb(unsigned long port, void *addr, unsigned short count);
extern void dma_insw(unsigned long port, void *addr, unsigned short count);
extern void dma_insl(unsigned long port, void *addr, unsigned short count);
-static inline void readsl(const void __iomem *addr, void *buf, int len)
-{
- insl((unsigned long)addr, buf, len);
-}
-
-static inline void readsw(const void __iomem *addr, void *buf, int len)
-{
- insw((unsigned long)addr, buf, len);
-}
-
-static inline void readsb(const void __iomem *addr, void *buf, int len)
-{
- insb((unsigned long)addr, buf, len);
-}
-
-static inline void writesl(const void __iomem *addr, const void *buf, int len)
-{
- outsl((unsigned long)addr, buf, len);
-}
-
-static inline void writesw(const void __iomem *addr, const void *buf, int len)
-{
- outsw((unsigned long)addr, buf, len);
-}
-
-static inline void writesb(const void __iomem *addr, const void *buf, int len)
-{
- outsb((unsigned long)addr, buf, len);
-}
-
-/*
- * Map some physical address range into the kernel address space.
- */
-static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
- int cacheflag)
-{
- return (void __iomem *)physaddr;
-}
-
-/*
- * Unmap a ioremap()ed region again
- */
-static inline void iounmap(void *addr)
-{
-}
-
-/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-static inline void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
- * Set new cache mode for some kernel address space.
- * The caller must push data for that range itself, if such data may already
- * be in the cache.
+/**
+ * I/O write barrier
+ *
+ * Ensure ordering of I/O space writes. This will make sure that writes
+ * following the barrier will arrive after all previous writes.
*/
-static inline void kernel_set_cachemode(void *addr, unsigned long size,
- int cmode)
-{
-}
-
-static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
-{
- return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void __iomem *ioremap_nocache(unsigned long physaddr,
- unsigned long size)
-{
- return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
+#define mmiowb() do { SSYNC(); wmb(); } while (0)
-extern void blkfin_inv_cache_all(void);
+#include <asm-generic/io.h>
#endif
-
-#define ioport_map(port, nr) ((void __iomem*)(port))
-#define ioport_unmap(addr)
-
-/* Pages to physical address... */
-#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
-
-#define phys_to_virt(vaddr) ((void *) (vaddr))
-#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-#endif /* __KERNEL__ */
-
-#endif /* _BFIN_IO_H */
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 41c4d70544e..3365cb97f53 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -13,9 +13,6 @@
#ifdef CONFIG_SMP
# include <asm/pda.h>
# include <asm/processor.h>
-/* Forward decl needed due to cdef inter dependencies */
-static inline uint32_t __pure bfin_dspid(void);
-# define blackfin_core_id() (bfin_dspid() & 0xff)
# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
#else
extern unsigned long bfin_irq_flags;
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index aea880274de..8af7772e84c 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -14,7 +14,7 @@
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
#include <asm/ptrace.h>
-#include <asm/blackfin.h>
+#include <mach/blackfin.h>
static inline unsigned long rdusp(void)
{
@@ -134,6 +134,8 @@ static inline uint32_t __pure bfin_dspid(void)
return bfin_read_DSPID();
}
+#define blackfin_core_id() (bfin_dspid() & 0xff)
+
static inline uint32_t __pure bfin_compiled_revid(void)
{
#if defined(CONFIG_BF_REV_0_0)
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 1942ccfedbe..1f286e71c21 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void arch_read_lock_asm(volatile int *ptr);
-asmlinkage int arch_read_trylock_asm(volatile int *ptr);
-asmlinkage void arch_read_unlock_asm(volatile int *ptr);
-asmlinkage void arch_write_lock_asm(volatile int *ptr);
-asmlinkage int arch_write_trylock_asm(volatile int *ptr);
-asmlinkage void arch_write_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_read_lock_asm(volatile int *ptr);
+asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_write_lock_asm(volatile int *ptr);
+asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
@@ -64,32 +64,36 @@ static inline int arch_write_can_lock(arch_rwlock_t *rw)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
- arch_read_lock_asm(&rw->lock);
+ __raw_read_lock_asm(&rw->lock);
}
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return arch_read_trylock_asm(&rw->lock);
+ return __raw_read_trylock_asm(&rw->lock);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- arch_read_unlock_asm(&rw->lock);
+ __raw_read_unlock_asm(&rw->lock);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- arch_write_lock_asm(&rw->lock);
+ __raw_write_lock_asm(&rw->lock);
}
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return arch_write_trylock_asm(&rw->lock);
+ return __raw_write_trylock_asm(&rw->lock);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- arch_write_unlock_asm(&rw->lock);
+ __raw_write_unlock_asm(&rw->lock);
}
#define arch_spin_relax(lock) cpu_relax()
diff --git a/arch/blackfin/include/mach-common/pll.h b/arch/blackfin/include/mach-common/pll.h
new file mode 100644
index 00000000000..382178b361a
--- /dev/null
+++ b/arch/blackfin/include/mach-common/pll.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_COMMON_PLL_H
+#define _MACH_COMMON_PLL_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/blackfin.h>
+#include <asm/irqflags.h>
+
+#ifndef bfin_iwr_restore
+static inline void
+bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
+{
+#ifdef SIC_IWR
+ bfin_write_SIC_IWR(iwr0);
+#else
+ bfin_write_SIC_IWR0(iwr0);
+# ifdef SIC_IWR1
+ bfin_write_SIC_IWR1(iwr1);
+# endif
+# ifdef SIC_IWR2
+ bfin_write_SIC_IWR2(iwr2);
+# endif
+#endif
+}
+#endif
+
+#ifndef bfin_iwr_save
+static inline void
+bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
+ unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+#ifdef SIC_IWR
+ *iwr0 = bfin_read_SIC_IWR();
+#else
+ *iwr0 = bfin_read_SIC_IWR0();
+# ifdef SIC_IWR1
+ *iwr1 = bfin_read_SIC_IWR1();
+# endif
+# ifdef SIC_IWR2
+ *iwr2 = bfin_read_SIC_IWR2();
+# endif
+#endif
+ bfin_iwr_restore(niwr0, niwr1, niwr2);
+}
+#endif
+
+static inline void _bfin_write_pll_relock(u32 addr, unsigned int val)
+{
+ unsigned long flags, iwr0, iwr1, iwr2;
+
+ if (val == bfin_read_PLL_CTL())
+ return;
+
+ flags = hard_local_irq_save();
+ /* Enable the PLL Wakeup bit in SIC IWR */
+ bfin_iwr_save(IWR_ENABLE(0), 0, 0, &iwr0, &iwr1, &iwr2);
+
+ bfin_write16(addr, val);
+ SSYNC();
+ asm("IDLE;");
+
+ bfin_iwr_restore(iwr0, iwr1, iwr2);
+ hard_local_irq_restore(flags);
+}
+
+/* Writing to PLL_CTL initiates a PLL relock sequence */
+static inline void bfin_write_PLL_CTL(unsigned int val)
+{
+ _bfin_write_pll_relock(PLL_CTL, val);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence */
+static inline void bfin_write_VR_CTL(unsigned int val)
+{
+ _bfin_write_pll_relock(VR_CTL, val);
+}
+
+#endif
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-a.h b/arch/blackfin/include/mach-common/ports-a.h
new file mode 100644
index 00000000000..9f78a761c40
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-a.h
@@ -0,0 +1,25 @@
+/*
+ * Port A Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_A__
+#define __BFIN_PERIPHERAL_PORT_A__
+
+#define PA0 (1 << 0)
+#define PA1 (1 << 1)
+#define PA2 (1 << 2)
+#define PA3 (1 << 3)
+#define PA4 (1 << 4)
+#define PA5 (1 << 5)
+#define PA6 (1 << 6)
+#define PA7 (1 << 7)
+#define PA8 (1 << 8)
+#define PA9 (1 << 9)
+#define PA10 (1 << 10)
+#define PA11 (1 << 11)
+#define PA12 (1 << 12)
+#define PA13 (1 << 13)
+#define PA14 (1 << 14)
+#define PA15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-b.h b/arch/blackfin/include/mach-common/ports-b.h
new file mode 100644
index 00000000000..b81702f09ec
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-b.h
@@ -0,0 +1,25 @@
+/*
+ * Port B Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_B__
+#define __BFIN_PERIPHERAL_PORT_B__
+
+#define PB0 (1 << 0)
+#define PB1 (1 << 1)
+#define PB2 (1 << 2)
+#define PB3 (1 << 3)
+#define PB4 (1 << 4)
+#define PB5 (1 << 5)
+#define PB6 (1 << 6)
+#define PB7 (1 << 7)
+#define PB8 (1 << 8)
+#define PB9 (1 << 9)
+#define PB10 (1 << 10)
+#define PB11 (1 << 11)
+#define PB12 (1 << 12)
+#define PB13 (1 << 13)
+#define PB14 (1 << 14)
+#define PB15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-c.h b/arch/blackfin/include/mach-common/ports-c.h
new file mode 100644
index 00000000000..3cc665e0ba0
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-c.h
@@ -0,0 +1,25 @@
+/*
+ * Port C Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_C__
+#define __BFIN_PERIPHERAL_PORT_C__
+
+#define PC0 (1 << 0)
+#define PC1 (1 << 1)
+#define PC2 (1 << 2)
+#define PC3 (1 << 3)
+#define PC4 (1 << 4)
+#define PC5 (1 << 5)
+#define PC6 (1 << 6)
+#define PC7 (1 << 7)
+#define PC8 (1 << 8)
+#define PC9 (1 << 9)
+#define PC10 (1 << 10)
+#define PC11 (1 << 11)
+#define PC12 (1 << 12)
+#define PC13 (1 << 13)
+#define PC14 (1 << 14)
+#define PC15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-d.h b/arch/blackfin/include/mach-common/ports-d.h
new file mode 100644
index 00000000000..868c6a01f1b
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-d.h
@@ -0,0 +1,25 @@
+/*
+ * Port D Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_D__
+#define __BFIN_PERIPHERAL_PORT_D__
+
+#define PD0 (1 << 0)
+#define PD1 (1 << 1)
+#define PD2 (1 << 2)
+#define PD3 (1 << 3)
+#define PD4 (1 << 4)
+#define PD5 (1 << 5)
+#define PD6 (1 << 6)
+#define PD7 (1 << 7)
+#define PD8 (1 << 8)
+#define PD9 (1 << 9)
+#define PD10 (1 << 10)
+#define PD11 (1 << 11)
+#define PD12 (1 << 12)
+#define PD13 (1 << 13)
+#define PD14 (1 << 14)
+#define PD15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-e.h b/arch/blackfin/include/mach-common/ports-e.h
new file mode 100644
index 00000000000..c88b0d0dd44
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-e.h
@@ -0,0 +1,25 @@
+/*
+ * Port E Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_E__
+#define __BFIN_PERIPHERAL_PORT_E__
+
+#define PE0 (1 << 0)
+#define PE1 (1 << 1)
+#define PE2 (1 << 2)
+#define PE3 (1 << 3)
+#define PE4 (1 << 4)
+#define PE5 (1 << 5)
+#define PE6 (1 << 6)
+#define PE7 (1 << 7)
+#define PE8 (1 << 8)
+#define PE9 (1 << 9)
+#define PE10 (1 << 10)
+#define PE11 (1 << 11)
+#define PE12 (1 << 12)
+#define PE13 (1 << 13)
+#define PE14 (1 << 14)
+#define PE15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-f.h b/arch/blackfin/include/mach-common/ports-f.h
new file mode 100644
index 00000000000..d6af2063327
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-f.h
@@ -0,0 +1,25 @@
+/*
+ * Port F Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_F__
+#define __BFIN_PERIPHERAL_PORT_F__
+
+#define PF0 (1 << 0)
+#define PF1 (1 << 1)
+#define PF2 (1 << 2)
+#define PF3 (1 << 3)
+#define PF4 (1 << 4)
+#define PF5 (1 << 5)
+#define PF6 (1 << 6)
+#define PF7 (1 << 7)
+#define PF8 (1 << 8)
+#define PF9 (1 << 9)
+#define PF10 (1 << 10)
+#define PF11 (1 << 11)
+#define PF12 (1 << 12)
+#define PF13 (1 << 13)
+#define PF14 (1 << 14)
+#define PF15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-g.h b/arch/blackfin/include/mach-common/ports-g.h
new file mode 100644
index 00000000000..09355d333c0
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-g.h
@@ -0,0 +1,25 @@
+/*
+ * Port G Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_G__
+#define __BFIN_PERIPHERAL_PORT_G__
+
+#define PG0 (1 << 0)
+#define PG1 (1 << 1)
+#define PG2 (1 << 2)
+#define PG3 (1 << 3)
+#define PG4 (1 << 4)
+#define PG5 (1 << 5)
+#define PG6 (1 << 6)
+#define PG7 (1 << 7)
+#define PG8 (1 << 8)
+#define PG9 (1 << 9)
+#define PG10 (1 << 10)
+#define PG11 (1 << 11)
+#define PG12 (1 << 12)
+#define PG13 (1 << 13)
+#define PG14 (1 << 14)
+#define PG15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-h.h b/arch/blackfin/include/mach-common/ports-h.h
new file mode 100644
index 00000000000..fa3910c6fbd
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-h.h
@@ -0,0 +1,25 @@
+/*
+ * Port H Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_H__
+#define __BFIN_PERIPHERAL_PORT_H__
+
+#define PH0 (1 << 0)
+#define PH1 (1 << 1)
+#define PH2 (1 << 2)
+#define PH3 (1 << 3)
+#define PH4 (1 << 4)
+#define PH5 (1 << 5)
+#define PH6 (1 << 6)
+#define PH7 (1 << 7)
+#define PH8 (1 << 8)
+#define PH9 (1 << 9)
+#define PH10 (1 << 10)
+#define PH11 (1 << 11)
+#define PH12 (1 << 12)
+#define PH13 (1 << 13)
+#define PH14 (1 << 14)
+#define PH15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-i.h b/arch/blackfin/include/mach-common/ports-i.h
new file mode 100644
index 00000000000..f176f08af62
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-i.h
@@ -0,0 +1,25 @@
+/*
+ * Port I Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_I__
+#define __BFIN_PERIPHERAL_PORT_I__
+
+#define PI0 (1 << 0)
+#define PI1 (1 << 1)
+#define PI2 (1 << 2)
+#define PI3 (1 << 3)
+#define PI4 (1 << 4)
+#define PI5 (1 << 5)
+#define PI6 (1 << 6)
+#define PI7 (1 << 7)
+#define PI8 (1 << 8)
+#define PI9 (1 << 9)
+#define PI10 (1 << 10)
+#define PI11 (1 << 11)
+#define PI12 (1 << 12)
+#define PI13 (1 << 13)
+#define PI14 (1 << 14)
+#define PI15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-j.h b/arch/blackfin/include/mach-common/ports-j.h
new file mode 100644
index 00000000000..924123ecec5
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-j.h
@@ -0,0 +1,25 @@
+/*
+ * Port J Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_J__
+#define __BFIN_PERIPHERAL_PORT_J__
+
+#define PJ0 (1 << 0)
+#define PJ1 (1 << 1)
+#define PJ2 (1 << 2)
+#define PJ3 (1 << 3)
+#define PJ4 (1 << 4)
+#define PJ5 (1 << 5)
+#define PJ6 (1 << 6)
+#define PJ7 (1 << 7)
+#define PJ8 (1 << 8)
+#define PJ9 (1 << 9)
+#define PJ10 (1 << 10)
+#define PJ11 (1 << 11)
+#define PJ12 (1 << 12)
+#define PJ13 (1 << 13)
+#define PJ14 (1 << 14)
+#define PJ15 (1 << 15)
+
+#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index bfe75af4e8b..886e00014d7 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -116,7 +116,7 @@ void __init generate_cplb_tables_all(void)
((_ramend - uncached_end) >= 1 * 1024 * 1024))
dcplb_bounds[i_d].eaddr = uncached_end;
else
- dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
+ dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
/* DMA uncached region. */
if (DMA_UNCACHED_REGION) {
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index edae461b1c5..eb92592fd80 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -345,6 +345,23 @@ void kgdb_roundup_cpu(int cpu, unsigned long flags)
}
#endif
+#ifdef CONFIG_IPIPE
+static unsigned long kgdb_arch_imask;
+#endif
+
+void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
+{
+ if (kgdb_single_step)
+ preempt_enable();
+
+#ifdef CONFIG_IPIPE
+ if (kgdb_arch_imask) {
+ cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask;
+ kgdb_arch_imask = 0;
+ }
+#endif
+}
+
int kgdb_arch_handle_exception(int vector, int signo,
int err_code, char *remcom_in_buffer,
char *remcom_out_buffer,
@@ -388,6 +405,12 @@ int kgdb_arch_handle_exception(int vector, int signo,
* kgdb_single_step > 0 means in single step mode
*/
kgdb_single_step = i + 1;
+
+ preempt_disable();
+#ifdef CONFIG_IPIPE
+ kgdb_arch_imask = cpu_pda[raw_smp_processor_id()].ex_imask;
+ cpu_pda[raw_smp_processor_id()].ex_imask = 0;
+#endif
}
bfin_correct_hw_break();
@@ -448,6 +471,9 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
int kgdb_arch_init(void)
{
kgdb_single_step = 0;
+#ifdef CONFIG_IPIPE
+ kgdb_arch_imask = 0;
+#endif
bfin_remove_all_hw_break();
return 0;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 08c0236acf3..2a6e9dbb62a 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -95,6 +95,10 @@ static int __init kgdbtest_init(void)
{
struct proc_dir_entry *entry;
+#if L2_LENGTH
+ num2 = 0;
+#endif
+
entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
if (entry == NULL)
return -ENOMEM;
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index b894c8abe7e..c0ccadcfa44 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -104,24 +104,23 @@ static const unsigned short bfin_mac_peripherals[] = {
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
- .addr = 1,
- .irq = IRQ_MAC_PHYINT,
- },
- {
- .addr = 2,
- .irq = IRQ_MAC_PHYINT,
- },
- {
+#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
.addr = 3,
+#else
+ .addr = 1,
+#endif
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
- .phydev_number = 3,
+ .phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
+#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
+ .phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */
+#endif
};
static struct platform_device bfin_mii_bus = {
@@ -453,7 +452,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -496,7 +495,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -636,9 +635,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -670,9 +669,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index e6ce1d7c523..50fc5c89e37 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -377,7 +377,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -420,7 +420,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -547,9 +547,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -581,9 +581,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/dma.c b/arch/blackfin/mach-bf518/dma.c
index 78b43605a0b..bcd1fbc8c54 100644
--- a/arch/blackfin/mach-bf518/dma.c
+++ b/arch/blackfin/mach-bf518/dma.c
@@ -11,7 +11,7 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
new file mode 100644
index 00000000000..00c603fe821
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS 2
+
+#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
index 970d310021e..f6d924ac0c4 100644
--- a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
* Licensed under the GPL-2 or later
*/
-#include <linux/serial.h>
#include <asm/dma.h>
#include <asm/portmux.h>
-#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v) bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
# define CONFIG_SERIAL_BFIN_CTSRTS
@@ -54,50 +27,6 @@
# endif
#endif
-#define BFIN_UART_TX_FIFO_SIZE 2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
- struct uart_port port;
- unsigned int old_status;
- int status_irq;
- unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- int tx_done;
- int tx_count;
- struct circ_buf rx_dma_buf;
- struct timer_list rx_dma_timer;
- int rx_dma_nrows;
- unsigned int tx_dma_channel;
- unsigned int rx_dma_channel;
- struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- struct timer_list cts_timer;
- int cts_pin;
- int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
- unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
- uart->lsr |= (lsr & (BI|FE|PE|OE));
- return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
- uart->lsr = 0;
- bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
struct bfin_serial_res {
unsigned long uart_base_addr;
int uart_irq;
@@ -146,3 +75,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
};
#define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 9053462be4b..a8828863226 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -1,61 +1,43 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
*/
#ifndef _MACH_BLACKFIN_H_
#define _MACH_BLACKFIN_H_
#include "bf518.h"
-#include "defBF512.h"
#include "anomaly.h"
-#if defined(CONFIG_BF518)
-#include "defBF518.h"
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF512
+# include "defBF512.h"
#endif
-
-#if defined(CONFIG_BF516)
-#include "defBF516.h"
-#endif
-
-#if defined(CONFIG_BF514)
-#include "defBF514.h"
+#ifdef CONFIG_BF514
+# include "defBF514.h"
#endif
-
-#if defined(CONFIG_BF512)
-#include "defBF512.h"
+#ifdef CONFIG_BF516
+# include "defBF516.h"
#endif
-
-#if !defined(__ASSEMBLY__)
-#include "cdefBF512.h"
-
-#if defined(CONFIG_BF518)
-#include "cdefBF518.h"
+#ifdef CONFIG_BF518
+# include "defBF518.h"
#endif
-#if defined(CONFIG_BF516)
-#include "cdefBF516.h"
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF512
+# include "cdefBF512.h"
+# endif
+# ifdef CONFIG_BF514
+# include "cdefBF514.h"
+# endif
+# ifdef CONFIG_BF516
+# include "cdefBF516.h"
+# endif
+# ifdef CONFIG_BF518
+# include "cdefBF518.h"
+# endif
#endif
-#if defined(CONFIG_BF514)
-#include "cdefBF514.h"
-#endif
-#endif
-
-#define BFIN_UART_NR_PORTS 2
-
-#define OFFSET_THR 0x00 /* Transmit Holding register */
-#define OFFSET_RBR 0x00 /* Receive Buffer register */
-#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
-#define OFFSET_IER 0x04 /* Interrupt Enable Register */
-#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
-#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
-#define OFFSET_LCR 0x0C /* Line Control Register */
-#define OFFSET_MCR 0x10 /* Modem Control Register */
-#define OFFSET_LSR 0x14 /* Line Status Register */
-#define OFFSET_MSR 0x18 /* Modem Status Register */
-#define OFFSET_SCR 0x1C /* SCR Scratch Register */
-#define OFFSET_GCTL 0x24 /* Global Control Register */
-
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
index 493020d0a65..b657d37a340 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,15 +7,1037 @@
#ifndef _CDEF_BF512_H
#define _CDEF_BF512_H
-/* include all Core registers and bit definitions */
-#include "defBF512.h"
+/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
+#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID() bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define bfin_read_SWRST() bfin_read16(SWRST)
+#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
+#define bfin_read_SYSCR() bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
-/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "cdefBF51x_base.h"
+#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
+#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
+
+#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
+
+#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
+#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
+
+#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
+#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+
+#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
+#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
+#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
+#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
+
+/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
+#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
+
+
+/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
+#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
+#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
+
+
+/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
+#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
+#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
+#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
+
+
+/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
+#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
+
+#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
+
+#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
+
+#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
+
+#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
+
+#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
+
+#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
+
+#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
+
+#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
+#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
+
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
+#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
+#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
+#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
+
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
+#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
+#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
+#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
+
+
+/* DMA Traffic Control Registers */
+#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER, val)
+#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT, val)
+
+/* DMA Controller */
+#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
+#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
+#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
+
+
+/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
+#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
+#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
+#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
+#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
+#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
+#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
+#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
+#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
+#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
+#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
+#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
+#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
+#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
+#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
+#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
+#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
+#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
+#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
+#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
+#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
+#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
+#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
+#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
+#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
+#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
+#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
+#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
+#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
+#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
+#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
+#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
+#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
+#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
+#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
+#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
+#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
+#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
+#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
+#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
+#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
+#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
+#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
+#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
+#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
+#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
+#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
+#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
+#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
+#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
+#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
+#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
+#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
+#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
+#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
+#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
+#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
+#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
+#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
+#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
+#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
+#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
+#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
+
+
+/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
+#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
+#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
+#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
+
+/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
+
+/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
+#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
+#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
+#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
+#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
+#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
+#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
+#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
+#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
+
+
+/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
+#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
+#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
+#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
+#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
+#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
+#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
+#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
+#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
+#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
+#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
+#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
+#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
+#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
+#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
+
+#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
+#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
+#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
+#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
+#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
+#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
+#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
+#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
+#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
+#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
+#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
+#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
+#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
+#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
+
+/* ==== end from cdefBF534.h ==== */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+
+#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
+#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
+#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
+#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
+
+#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
+#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
+#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
+#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
+#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
+#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
+#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
+#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
+#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
+#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
+#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
+#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
+#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
+#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
+#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
+#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
+#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
+#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
+#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
+#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
+#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
+#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
+#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
+#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
+
+/* HOST Port Registers */
+
+#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
+#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
+#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
+#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
+#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
+#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
+
+/* Security Registers */
+
+#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
+#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
+#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
+#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
+#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
+#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
#endif /* _CDEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
index 108fa4bde27..dc988668203 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,9 +7,6 @@
#ifndef _CDEF_BF514_H
#define _CDEF_BF514_H
-/* include all Core registers and bit definitions */
-#include "defBF514.h"
-
/* BF514 is BF512 + RSI */
#include "cdefBF512.h"
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
index 2751592ef1c..142e45cbc25 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,9 +7,6 @@
#ifndef _CDEF_BF516_H
#define _CDEF_BF516_H
-/* include all Core registers and bit definitions */
-#include "defBF516.h"
-
/* BF516 is BF514 + EMAC */
#include "cdefBF514.h"
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
index 7fb7f0eab99..e638197bf8b 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,9 +7,6 @@
#ifndef _CDEF_BF518_H
#define _CDEF_BF518_H
-/* include all Core registers and bit definitions */
-#include "defBF518.h"
-
/* BF518 is BF516 + IEEE-1588 */
#include "cdefBF516.h"
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
deleted file mode 100644
index e16969f24ff..00000000000
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
+++ /dev/null
@@ -1,1061 +0,0 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _CDEF_BF52X_H
-#define _CDEF_BF52X_H
-
-#include <asm/blackfin.h>
-
-#include "defBF51x_base.h"
-
-/* Include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* ==== begin from cdefBF534.h ==== */
-
-/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
-#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
-#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
-#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
-#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
-#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
-#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
-#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
-#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
-#define bfin_read_CHIPID() bfin_read32(CHIPID)
-#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
-#define bfin_read_SWRST() bfin_read16(SWRST)
-#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
-#define bfin_read_SYSCR() bfin_read16(SYSCR)
-#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
-
-#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
-#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
-#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
-#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
-#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
-#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
-
-#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
-#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
-#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
-#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
-#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
-#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
-#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
-#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
-
-#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
-#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
-#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
-#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
-
-#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
-#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
-#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
-#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
-
-/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
-
-#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
-#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
-#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
-#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
-#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
-#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
-#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
-#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
-#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
-#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
-#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
-#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
-#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
-#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
-
-/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
-#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
-#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
-#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
-#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
-#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
-#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
-
-
-/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
-#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
-#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
-#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
-#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
-#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
-#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
-#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
-#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
-#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
-#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
-#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
-#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
-#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
-#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
-
-
-/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
-#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
-#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
-#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
-#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
-#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
-#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
-#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
-#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
-#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
-#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
-#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
-#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
-#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
-#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
-#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
-#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
-#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
-#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
-#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
-#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
-#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
-#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
-#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
-#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
-
-
-/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
-#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
-#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
-#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
-#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
-#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
-#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
-#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
-#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
-
-#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
-#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
-#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
-#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
-#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
-#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
-#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
-#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
-
-#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
-#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
-#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
-#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
-#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
-#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
-#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
-#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
-
-#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
-#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
-#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
-#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
-#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
-#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
-#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
-#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
-
-#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
-#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
-#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
-#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
-#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
-#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
-#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
-#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
-
-#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
-#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
-#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
-#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
-#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
-#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
-#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
-#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
-
-#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
-#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
-#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
-#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
-#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
-#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
-#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
-#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
-
-#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
-#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
-#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
-#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
-#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
-#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
-#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
-#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
-
-#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
-#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
-#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
-#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
-#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
-#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
-#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
-#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
-#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
-#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
-#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
-#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
-#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
-#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
-#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
-#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
-#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
-#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
-#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
-#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
-#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
-#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
-#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
-#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
-#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
-#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
-#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
-#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
-#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
-#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
-#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
-#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
-#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
-#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
-#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
-#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
-#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
-
-
-/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
-#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
-#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
-#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
-
-
-/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
-#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
-#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
-#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
-#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
-#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
-#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
-#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
-#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
-#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
-#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
-#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
-#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
-#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
-#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
-#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
-#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
-#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
-#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
-#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
-#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
-#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
-#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
-#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
-#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
-#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
-#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
-#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
-#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
-#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
-#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
-#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
-#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
-#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
-#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
-#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
-#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
-#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
-#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
-#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
-#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
-#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
-#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
-#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
-#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
-#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
-#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
-#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
-#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
-#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
-#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
-#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
-#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
-#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
-#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
-#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
-#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
-#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
-
-
-/* DMA Traffic Control Registers */
-#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER, val)
-#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT, val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER, val)
-#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT, val)
-
-/* DMA Controller */
-#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
-#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
-#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
-#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
-#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
-#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
-#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
-#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
-#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
-#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
-#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
-#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
-#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
-#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
-#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
-#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
-#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
-#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
-#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
-#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
-#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
-#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
-#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
-#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
-#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
-#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
-#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
-#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
-#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
-#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
-#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
-#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
-#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
-#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
-#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
-#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
-#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
-#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
-#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
-#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
-#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
-#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
-#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
-#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
-#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
-#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
-#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
-#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
-#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
-#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
-#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
-#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
-#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
-#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
-#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
-#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
-#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
-#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
-#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
-#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
-#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
-#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
-#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
-#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
-#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
-#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
-#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
-#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
-#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
-#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
-#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
-#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
-#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
-#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
-#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
-#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
-#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
-#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
-#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
-#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
-#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
-#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
-#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
-#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
-#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
-#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
-#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
-#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
-#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
-#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
-#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
-#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
-#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
-#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
-#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
-#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
-#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
-#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
-#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
-#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
-#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
-#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
-#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
-#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
-#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
-#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
-#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
-#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
-#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
-#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
-#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
-#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
-#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
-#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
-#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
-#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
-#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
-#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
-#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
-#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
-#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
-#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
-#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
-#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
-#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
-#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
-#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
-#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
-#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
-#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
-#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
-#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
-#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
-#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
-#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
-#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
-#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
-#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
-#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
-#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
-#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
-#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
-#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
-#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
-#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
-#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
-#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
-#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
-#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
-#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
-#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
-#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
-#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
-#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
-#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
-#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
-#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
-#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
-#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
-#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
-#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
-#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
-#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
-#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
-#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
-#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
-#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
-#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
-#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
-#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
-#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
-#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
-#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
-#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
-#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
-#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
-#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
-#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
-#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
-#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
-#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
-#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
-#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
-#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
-#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
-#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
-#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
-#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
-#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
-#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
-#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
-#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
-#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
-#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
-#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
-#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
-#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
-#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
-#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
-#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
-#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
-#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
-#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
-#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
-#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
-#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
-#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
-#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
-#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
-#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
-#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
-#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
-#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
-#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
-#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
-#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
-#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
-#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
-#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
-#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
-#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
-#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
-#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
-#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
-#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
-#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
-#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
-#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
-#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
-#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
-#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
-#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
-#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
-#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
-#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
-#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
-#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
-#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
-#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
-#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
-#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
-#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
-#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
-#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
-#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
-#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
-#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
-#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
-#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
-#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
-#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
-#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
-#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
-#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
-#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
-#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
-#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
-#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
-#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
-#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
-#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
-#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
-#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
-#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
-#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
-#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
-#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
-#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
-#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
-#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
-#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
-#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
-#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
-#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
-#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
-#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
-#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
-#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
-#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
-#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
-#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
-#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
-#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
-#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
-#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
-#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
-#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
-#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
-#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
-#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
-#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
-#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
-#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
-#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
-#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
-#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
-#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
-#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
-#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
-#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
-#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
-#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
-#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
-#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
-#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
-#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
-#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
-#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
-#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
-#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
-#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
-#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
-#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
-#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
-#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
-#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
-#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
-#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
-#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
-#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
-#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
-#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
-#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
-#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
-#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
-#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
-#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
-#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
-#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
-#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
-#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
-#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
-#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
-#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
-#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
-#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
-#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
-#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
-#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
-#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
-#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
-#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
-#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
-#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
-#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
-#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
-
-
-/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
-#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
-#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
-#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
-#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
-#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
-#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
-#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
-#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
-#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
-#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
-#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
-#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
-#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
-#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
-#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
-#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
-#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
-#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
-#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
-#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
-#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
-#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
-#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
-#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
-#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
-#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
-#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
-#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
-#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
-#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
-#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
-#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
-#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
-#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
-#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
-#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
-#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
-#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
-#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
-#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
-#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
-#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
-#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
-#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
-#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
-#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
-#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
-#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
-#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
-#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
-#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
-#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
-#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
-#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
-#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
-#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
-#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
-#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
-#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
-#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
-#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
-#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
-
-
-/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
-#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
-#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
-#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
-#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
-#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
-#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
-#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
-#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
-#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
-#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
-#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
-#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
-#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
-#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
-#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
-#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
-#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
-#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
-#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
-#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
-#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
-#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
-#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
-#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
-
-/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
-
-/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
-#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
-#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
-#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
-#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
-#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
-#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
-#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
-#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
-
-
-/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
-#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
-
-#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
-
-/* ==== end from cdefBF534.h ==== */
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-
-#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
-#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
-#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
-#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
-#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
-#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
-
-#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
-#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
-#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
-#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
-#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
-#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
-#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
-#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
-#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
-#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
-#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
-#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
-#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
-#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
-#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
-#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
-#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
-#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
-#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
-#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
-#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
-#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
-#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
-#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
-
-/* Counter Registers */
-
-#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
-#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
-#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
-#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
-#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
-#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
-#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
-#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
-#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
-#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
-#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
-#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
-#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
-#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
-#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
-#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
-
-/* Security Registers */
-
-#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
-#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
-#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
-#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
-#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
-#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
-#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
index 9b505bb0cb2..27285823fb2 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,12 +7,1388 @@
#ifndef _DEF_BF512_H
#define _DEF_BF512_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
+/* ************************************************************** */
+/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x */
+/* ************************************************************** */
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
+#define PLL_CTL 0xFFC00000 /* PLL Control Register */
+#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
+#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
+#define PLL_STAT 0xFFC0000C /* PLL Status Register */
+#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
+#define CHIPID 0xFFC00014 /* Device ID Register */
-/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "defBF51x_base.h"
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define SWRST 0xFFC00100 /* Software Reset Register */
+#define SYSCR 0xFFC00104 /* System Configuration Register */
+#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
+
+#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
+#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
+#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
+#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
+#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
+#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
+#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
+#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
+#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
+#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
+#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
+#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
+#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
+
+
+/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
+#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
+#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
+#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
+
+
+/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define RTC_STAT 0xFFC00300 /* RTC Status Register */
+#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
+#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
+#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
+#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
+#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
+#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
+
+
+/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
+#define UART0_THR 0xFFC00400 /* Transmit Holding register */
+#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
+#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
+#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
+#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
+#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
+#define UART0_LCR 0xFFC0040C /* Line Control Register */
+#define UART0_MCR 0xFFC00410 /* Modem Control Register */
+#define UART0_LSR 0xFFC00414 /* Line Status Register */
+#define UART0_MSR 0xFFC00418 /* Modem Status Register */
+#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
+#define UART0_GCTL 0xFFC00424 /* Global Control Register */
+
+/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
+#define SPI0_REGBASE 0xFFC00500
+#define SPI0_CTL 0xFFC00500 /* SPI Control Register */
+#define SPI0_FLG 0xFFC00504 /* SPI Flag register */
+#define SPI0_STAT 0xFFC00508 /* SPI Status register */
+#define SPI0_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
+#define SPI0_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
+#define SPI0_BAUD 0xFFC00514 /* SPI Baud rate Register */
+#define SPI0_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
+
+/* SPI1 Controller (0xFFC03400 - 0xFFC034FF) */
+#define SPI1_REGBASE 0xFFC03400
+#define SPI1_CTL 0xFFC03400 /* SPI Control Register */
+#define SPI1_FLG 0xFFC03404 /* SPI Flag register */
+#define SPI1_STAT 0xFFC03408 /* SPI Status register */
+#define SPI1_TDBR 0xFFC0340C /* SPI Transmit Data Buffer Register */
+#define SPI1_RDBR 0xFFC03410 /* SPI Receive Data Buffer Register */
+#define SPI1_BAUD 0xFFC03414 /* SPI Baud rate Register */
+#define SPI1_SHADOW 0xFFC03418 /* SPI_RDBR Shadow Register */
+
+/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
+#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
+#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
+#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
+#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
+
+#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
+#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
+#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
+#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
+
+#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
+#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
+#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
+#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
+
+#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
+#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
+#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
+#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
+
+#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
+#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
+#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
+#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
+
+#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
+#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
+#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
+#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
+
+#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
+#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
+#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
+#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
+
+#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
+#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
+#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
+#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
+
+#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
+#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
+#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
+#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
+#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
+#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
+#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
+#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
+#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
+#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
+#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
+#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
+#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
+#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
+#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
+#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
+#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
+#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
+#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
+#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
+#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
+#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
+#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
+#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
+#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
+#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
+#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
+#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
+#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
+#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
+#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
+#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
+#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
+#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
+#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
+#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
+#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
+#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
+#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
+#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
+#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
+#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
+#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
+#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
+#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
+#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
+#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
+#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
+#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
+#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
+#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
+#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
+#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
+#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
+#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
+#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
+#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
+#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
+#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
+#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
+#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
+#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
+#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
+#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
+#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
+#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
+#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
+#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
+
+/* DMA Traffic Control Registers */
+#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
+#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
+
+/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
+#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
+#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
+#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
+#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
+#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
+#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
+#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
+#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
+#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
+#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
+#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
+#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
+#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
+
+#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
+#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
+#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
+#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
+#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
+#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
+#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
+#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
+#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
+#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
+#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
+#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
+#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
+
+#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
+#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
+#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
+#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
+#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
+#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
+#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
+#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
+#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
+#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
+#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
+#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
+#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
+
+#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
+#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
+#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
+#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
+#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
+#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
+#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
+#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
+#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
+#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
+#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
+#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
+#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
+
+#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
+#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
+#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
+#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
+#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
+#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
+#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
+#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
+#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
+#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
+#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
+#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
+#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
+
+#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
+#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
+#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
+#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
+#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
+#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
+#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
+#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
+#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
+#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
+#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
+#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
+#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
+
+#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
+#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
+#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
+#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
+#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
+#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
+#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
+#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
+#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
+#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
+#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
+#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
+#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
+
+#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
+#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
+#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
+#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
+#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
+#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
+#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
+#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
+#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
+#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
+#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
+#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
+#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
+
+#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
+#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
+#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
+#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
+#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
+#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
+#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
+#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
+#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
+#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
+#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
+#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
+#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
+
+#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
+#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
+#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
+#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
+#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
+#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
+#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
+#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
+#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
+#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
+#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
+#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
+#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
+
+#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
+#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
+#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
+#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
+#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
+#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
+#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
+#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
+#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
+#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
+#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
+#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
+#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
+
+#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
+#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
+#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
+#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
+#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
+#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
+#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
+#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
+#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
+#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
+#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
+#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
+#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
+
+#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
+#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
+#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
+#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
+#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
+#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
+#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
+#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
+#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
+#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
+#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
+#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
+#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
+
+#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
+#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
+#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
+#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
+#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
+#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
+#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
+#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
+#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
+#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
+#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
+#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
+#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
+
+#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
+#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
+#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
+#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
+#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
+#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
+#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
+#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
+#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
+#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
+#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
+#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
+#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
+
+#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
+#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
+#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
+#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
+#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
+#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
+#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
+#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
+#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
+#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
+#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
+#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
+#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
+#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
+#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
+#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
+#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
+#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
+
+
+/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
+#define TWI0_REGBASE 0xFFC01400
+#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
+#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
+#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
+#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
+#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
+#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
+#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
+#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
+#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
+#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
+#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
+#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
+#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
+#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
+#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
+#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
+
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
+#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
+#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
+#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
+#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
+#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
+#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
+#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
+#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
+#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
+#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
+#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
+#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
+#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
+#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
+#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
+#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
+#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
+#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
+#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
+#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
+#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
+#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
+#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
+#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
+#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
+#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
+#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
+#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
+#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
+#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
+#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
+#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
+#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
+#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
+
+
+/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
+#define UART1_THR 0xFFC02000 /* Transmit Holding register */
+#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
+#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
+#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
+#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
+#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
+#define UART1_LCR 0xFFC0200C /* Line Control Register */
+#define UART1_MCR 0xFFC02010 /* Modem Control Register */
+#define UART1_LSR 0xFFC02014 /* Line Status Register */
+#define UART1_MSR 0xFFC02018 /* Modem Status Register */
+#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
+#define UART1_GCTL 0xFFC02024 /* Global Control Register */
+
+
+/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
+#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
+#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
+#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
+#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
+
+
+/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
+#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
+#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
+#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
+#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
+#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
+#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
+#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
+
+#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
+#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
+#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
+#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
+#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
+#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
+#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
+
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+#define PORTF_MUX 0xFFC03210 /* Port F mux control */
+#define PORTG_MUX 0xFFC03214 /* Port G mux control */
+#define PORTH_MUX 0xFFC03218 /* Port H mux control */
+#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
+#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
+#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
+#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
+#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
+#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
+#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
+#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
+#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
+#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
+#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
+#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
+
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer: All macros are intended to make C and Assembly code more readable.
+** Use these macros carefully, as any that do left shifts for field
+** depositing will result in the lower order bits being destroyed. Any
+** macro that shifts left to properly position the bit-field should be
+** used as part of an OR to initialize a register and NOT as a dynamic
+** modifier UNLESS the lower order bits are saved and ORed back in when
+** the macro is used.
+*************************************************************************************/
+
+/* CHIPID Masks */
+#define CHIPID_VERSION 0xF0000000
+#define CHIPID_FAMILY 0x0FFFF000
+#define CHIPID_MANUFACTURE 0x00000FFE
+
+/* SWRST Masks */
+#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
+#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
+#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
+#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
+#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
+
+/* SYSCR Masks */
+#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
+#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
+
+
+/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
+/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
+
+#if 0
+#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
+
+#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
+#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
+#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
+#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
+#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
+#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
+#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
+
+#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
+#define IRQ_TWI 0x00000200 /* TWI Interrupt */
+#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
+#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
+#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
+#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
+#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
+#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
+
+#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
+#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
+#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
+#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
+#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
+#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
+#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
+#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
+#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
+#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
+
+#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
+#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
+#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
+#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
+#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
+#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
+#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
+#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
+#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
+#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
+#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
+#endif
+
+/* SIC_IAR0 Macros */
+#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
+#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
+#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
+#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
+#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
+#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
+#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
+#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
+
+/* SIC_IAR1 Macros */
+#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
+#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
+#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
+#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
+#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
+#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
+#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
+#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
+
+/* SIC_IAR2 Macros */
+#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
+#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
+#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
+#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
+#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
+#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
+#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
+#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
+
+/* SIC_IAR3 Macros */
+#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
+#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
+#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
+#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
+#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
+#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
+#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
+#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
+
+
+/* SIC_IMASK Masks */
+#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
+#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
+#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
+#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
+
+/* SIC_IWR Masks */
+#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
+#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
+#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
+#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
+
+/* **************** GENERAL PURPOSE TIMER MASKS **********************/
+/* TIMER_ENABLE Masks */
+#define TIMEN0 0x0001 /* Enable Timer 0 */
+#define TIMEN1 0x0002 /* Enable Timer 1 */
+#define TIMEN2 0x0004 /* Enable Timer 2 */
+#define TIMEN3 0x0008 /* Enable Timer 3 */
+#define TIMEN4 0x0010 /* Enable Timer 4 */
+#define TIMEN5 0x0020 /* Enable Timer 5 */
+#define TIMEN6 0x0040 /* Enable Timer 6 */
+#define TIMEN7 0x0080 /* Enable Timer 7 */
+
+/* TIMER_DISABLE Masks */
+#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
+#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
+#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
+#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
+#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
+#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
+#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
+#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
+
+/* TIMER_STATUS Masks */
+#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
+#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
+#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
+#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
+#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
+#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
+#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
+#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
+#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
+#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
+#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
+#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
+#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
+#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
+#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
+#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
+#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
+#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
+#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
+#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
+#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
+#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
+#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
+#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR3 TOVF_ERR3
+#define TOVL_ERR4 TOVF_ERR4
+#define TOVL_ERR5 TOVF_ERR5
+#define TOVL_ERR6 TOVF_ERR6
+#define TOVL_ERR7 TOVF_ERR7
+
+/* TIMERx_CONFIG Masks */
+#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
+#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
+#define EXT_CLK 0x0003 /* External Clock Mode */
+#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
+#define PERIOD_CNT 0x0008 /* Period Count */
+#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
+#define TIN_SEL 0x0020 /* Timer Input Select */
+#define OUT_DIS 0x0040 /* Output Pad Disable */
+#define CLK_SEL 0x0080 /* Timer Clock Select */
+#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
+#define EMU_RUN 0x0200 /* Emulation Behavior Select */
+#define ERR_TYP 0xC000 /* Error Type */
+
+/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
+/* EBIU_AMGCTL Masks */
+#define AMCKEN 0x0001 /* Enable CLKOUT */
+#define AMBEN_NONE 0x0000 /* All Banks Disabled */
+#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
+#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
+#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
+#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
+
+/* EBIU_AMBCTL0 Masks */
+#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
+#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
+#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
+#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
+#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
+#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
+#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
+#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
+#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
+#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
+#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
+#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
+#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
+#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
+#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
+#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
+#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
+#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
+#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
+#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
+#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
+#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
+#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
+#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
+#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
+#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
+#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
+#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
+#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
+#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
+#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
+#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
+#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
+#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
+#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
+#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
+
+#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
+#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
+#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
+#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
+#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
+#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
+#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
+#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
+#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
+#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
+#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
+#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
+#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
+#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
+#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
+#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
+#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
+#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
+#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
+#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
+#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
+#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
+#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
+#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
+#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
+#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
+#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
+#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
+#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
+#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
+#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
+#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
+#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
+#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
+#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
+#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
+
+/* EBIU_AMBCTL1 Masks */
+#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
+#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
+#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
+#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
+#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
+#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
+#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
+#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
+#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
+#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
+#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
+#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
+#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
+#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
+#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
+#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
+#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
+#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
+#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
+#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
+#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
+#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
+#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
+#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
+#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
+#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
+#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
+#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
+#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
+#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
+#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
+#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
+#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
+#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
+#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
+#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
+
+#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
+#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
+#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
+#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
+#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
+#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
+#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
+#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
+#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
+#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
+#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
+#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
+#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
+#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
+#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
+#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
+#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
+#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
+#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
+#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
+#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
+#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
+#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
+#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
+#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
+#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
+#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
+#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
+#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
+#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
+#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
+#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
+#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
+#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
+#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
+#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
+
+
+/* ********************** SDRAM CONTROLLER MASKS **********************************************/
+/* EBIU_SDGCTL Masks */
+#define SCTLE 0x00000001 /* Enable SDRAM Signals */
+#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
+#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
+#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
+#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
+#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
+#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
+#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
+#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
+#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
+#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
+#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
+#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
+#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
+#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
+#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
+#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
+#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
+#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
+#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
+#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
+#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
+#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
+#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
+#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
+#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
+#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
+#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
+#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
+#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
+#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
+#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
+#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
+#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
+#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
+#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
+#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
+#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
+#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
+#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
+#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
+#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
+#define EBUFE 0x02000000 /* Enable External Buffering Timing */
+#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
+#define EMREN 0x10000000 /* Extended Mode Register Enable */
+#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
+#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
+
+/* EBIU_SDBCTL Masks */
+#define EBE 0x0001 /* Enable SDRAM External Bank */
+#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
+#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
+#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
+#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
+#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
+#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
+#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
+#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
+#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
+#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
+
+/* EBIU_SDSTAT Masks */
+#define SDCI 0x0001 /* SDRAM Controller Idle */
+#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
+#define SDPUA 0x0004 /* SDRAM Power-Up Active */
+#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
+#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
+#define BGSTAT 0x0020 /* Bus Grant Status */
+
+
+/* ************************** DMA CONTROLLER MASKS ********************************/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
+#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
+#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
+#define PMAP_PPI 0x0000 /* PPI Port DMA */
+#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
+#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
+#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
+#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
+#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
+#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
+#define PMAP_SPI 0x7000 /* SPI Port DMA */
+#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
+#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
+#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
+#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
+
+/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
+/* PPI_CONTROL Masks */
+#define PORT_EN 0x0001 /* PPI Port Enable */
+#define PORT_DIR 0x0002 /* PPI Port Direction */
+#define XFR_TYPE 0x000C /* PPI Transfer Type */
+#define PORT_CFG 0x0030 /* PPI Port Configuration */
+#define FLD_SEL 0x0040 /* PPI Active Field Select */
+#define PACK_EN 0x0080 /* PPI Packing Mode */
+#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
+#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
+#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
+#define DLEN_8 0x0000 /* Data Length = 8 Bits */
+#define DLEN_10 0x0800 /* Data Length = 10 Bits */
+#define DLEN_11 0x1000 /* Data Length = 11 Bits */
+#define DLEN_12 0x1800 /* Data Length = 12 Bits */
+#define DLEN_13 0x2000 /* Data Length = 13 Bits */
+#define DLEN_14 0x2800 /* Data Length = 14 Bits */
+#define DLEN_15 0x3000 /* Data Length = 15 Bits */
+#define DLEN_16 0x3800 /* Data Length = 16 Bits */
+#define DLENGTH 0x3800 /* PPI Data Length */
+#define POLC 0x4000 /* PPI Clock Polarity */
+#define POLS 0x8000 /* PPI Frame Sync Polarity */
+
+/* PPI_STATUS Masks */
+#define FLD 0x0400 /* Field Indicator */
+#define FT_ERR 0x0800 /* Frame Track Error */
+#define OVR 0x1000 /* FIFO Overflow Error */
+#define UNDR 0x2000 /* FIFO Underrun Error */
+#define ERR_DET 0x4000 /* Error Detected Indicator */
+#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
+
+
+/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
+#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
+#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
+
+/* TWI_PRESCALE Masks */
+#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
+#define TWI_ENA 0x0080 /* TWI Enable */
+#define SCCB 0x0200 /* SCCB Compatibility Enable */
+
+/* TWI_SLAVE_CTL Masks */
+#define SEN 0x0001 /* Slave Enable */
+#define SADD_LEN 0x0002 /* Slave Address Length */
+#define STDVAL 0x0004 /* Slave Transmit Data Valid */
+#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
+#define GEN 0x0010 /* General Call Adrress Matching Enabled */
+
+/* TWI_SLAVE_STAT Masks */
+#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
+#define GCALL 0x0002 /* General Call Indicator */
+
+/* TWI_MASTER_CTL Masks */
+#define MEN 0x0001 /* Master Mode Enable */
+#define MADD_LEN 0x0002 /* Master Address Length */
+#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
+#define FAST 0x0008 /* Use Fast Mode Timing Specs */
+#define STOP 0x0010 /* Issue Stop Condition */
+#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
+#define DCNT 0x3FC0 /* Data Bytes To Transfer */
+#define SDAOVR 0x4000 /* Serial Data Override */
+#define SCLOVR 0x8000 /* Serial Clock Override */
+
+/* TWI_MASTER_STAT Masks */
+#define MPROG 0x0001 /* Master Transfer In Progress */
+#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
+#define ANAK 0x0004 /* Address Not Acknowledged */
+#define DNAK 0x0008 /* Data Not Acknowledged */
+#define BUFRDERR 0x0010 /* Buffer Read Error */
+#define BUFWRERR 0x0020 /* Buffer Write Error */
+#define SDASEN 0x0040 /* Serial Data Sense */
+#define SCLSEN 0x0080 /* Serial Clock Sense */
+#define BUSBUSY 0x0100 /* Bus Busy Indicator */
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
+#define SINIT 0x0001 /* Slave Transfer Initiated */
+#define SCOMP 0x0002 /* Slave Transfer Complete */
+#define SERR 0x0004 /* Slave Transfer Error */
+#define SOVF 0x0008 /* Slave Overflow */
+#define MCOMP 0x0010 /* Master Transfer Complete */
+#define MERR 0x0020 /* Master Transfer Error */
+#define XMTSERV 0x0040 /* Transmit FIFO Service */
+#define RCVSERV 0x0080 /* Receive FIFO Service */
+
+/* TWI_FIFO_CTRL Masks */
+#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
+#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
+#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
+#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
+
+/* TWI_FIFO_STAT Masks */
+#define XMTSTAT 0x0003 /* Transmit FIFO Status */
+#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
+#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
+#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
+
+#define RCVSTAT 0x000C /* Receive FIFO Status */
+#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
+#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
+#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
+
+
+/* ******************* PIN CONTROL REGISTER MASKS ************************/
+/* PORT_MUX Masks */
+#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
+#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
+#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
+
+#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
+#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
+#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
+#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
+
+#define PFDE 0x0008 /* Port F DMA Request Enable */
+#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
+#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
+
+#define PFTE 0x0010 /* Port F Timer Enable */
+#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
+#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
+
+#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
+#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
+#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
+
+#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
+#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
+#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
+
+#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
+#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
+#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
+
+#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
+#define PFFE_TIMER 0x0000 /* Enable TMR2 */
+#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
+
+#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
+#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
+#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
+
+#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
+#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
+#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
+
+#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
+#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
+#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
+
+
+/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
+/* HDMAx_CTL Masks */
+#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
+#define REP 0x0002 /* HDMA Request Polarity */
+#define UTE 0x0004 /* Urgency Threshold Enable */
+#define OIE 0x0010 /* Overflow Interrupt Enable */
+#define BDIE 0x0020 /* Block Done Interrupt Enable */
+#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
+#define DRQ 0x0300 /* HDMA Request Type */
+#define DRQ_NONE 0x0000 /* No Request */
+#define DRQ_SINGLE 0x0100 /* Channels Request Single */
+#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
+#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
+#define RBC 0x1000 /* Reload BCNT With IBCNT */
+#define PS 0x2000 /* HDMA Pin Status */
+#define OI 0x4000 /* Overflow Interrupt Generated */
+#define BDI 0x8000 /* Block Done Interrupt Generated */
+
+/* entry addresses of the user-callable Boot ROM functions */
+
+#define _BOOTROM_RESET 0xEF000000
+#define _BOOTROM_FINAL_INIT 0xEF000002
+#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
+#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
+#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
+#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
+#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
+#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
+#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define PGDE_UART PFDE_UART
+#define PGDE_DMA PFDE_DMA
+#define CKELOW SCKELOW
+
+/* HOST Port Registers */
+
+#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
+#define HOST_STATUS 0xffc03404 /* HOST Status Register */
+#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
+
+/* Counter Registers */
+
+#define CNT_CONFIG 0xffc03500 /* Configuration Register */
+#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
+#define CNT_STATUS 0xffc03508 /* Status Register */
+#define CNT_COMMAND 0xffc0350c /* Command Register */
+#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
+#define CNT_COUNTER 0xffc03514 /* Counter Register */
+#define CNT_MAX 0xffc03518 /* Maximal Count Register */
+#define CNT_MIN 0xffc0351c /* Minimal Count Register */
+
+/* OTP/FUSE Registers */
+
+#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
+#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
+#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
+#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
+
+/* Security Registers */
+
+#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
+#define SECURE_CONTROL 0xffc03624 /* Secure Control */
+#define SECURE_STATUS 0xffc03628 /* Secure Status */
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+
+/* Motor Control PWM Registers */
+
+#define PWM_CTRL 0xffc03700 /* PWM Control Register */
+#define PWM_STAT 0xffc03704 /* PWM Status Register */
+#define PWM_TM 0xffc03708 /* PWM Period Register */
+#define PWM_DT 0xffc0370c /* PWM Dead Time Register */
+#define PWM_GATE 0xffc03710 /* PWM Chopping Control */
+#define PWM_CHA 0xffc03714 /* PWM Channel A Duty Control */
+#define PWM_CHB 0xffc03718 /* PWM Channel B Duty Control */
+#define PWM_CHC 0xffc0371c /* PWM Channel C Duty Control */
+#define PWM_SEG 0xffc03720 /* PWM Crossover and Output Enable */
+#define PWM_SYNCWT 0xffc03724 /* PWM Sync Pluse Width Control */
+#define PWM_CHAL 0xffc03728 /* PWM Channel AL Duty Control (SR mode only) */
+#define PWM_CHBL 0xffc0372c /* PWM Channel BL Duty Control (SR mode only) */
+#define PWM_CHCL 0xffc03730 /* PWM Channel CL Duty Control (SR mode only) */
+#define PWM_LSI 0xffc03734 /* PWM Low Side Invert (SR mode only) */
+#define PWM_STAT2 0xffc03738 /* PWM Status Register 2 */
+
+
+/* ********************************************************** */
+/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
+/* and MULTI BIT READ MACROS */
+/* ********************************************************** */
+
+/* Bit masks for HOST_CONTROL */
+
+#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
+#define HOST_CNTR_nHOST_EN 0x0
+#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
+#define HOST_CNTR_nHOST_END 0x0
+#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
+#define HOST_CNTR_nDATA_SIZE 0x0
+#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
+#define HOST_CNTR_nHOST_RST 0x0
+#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
+#define HOST_CNTR_nHRDY_OVR 0x0
+#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
+#define HOST_CNTR_nINT_MODE 0x0
+#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
+#define HOST_CNTR_ nBT_EN 0x0
+#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
+#define HOST_CNTR_nEHW 0x0
+#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
+#define HOST_CNTR_nEHR 0x0
+#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
+#define HOST_CNTR_nBDR 0x0
+
+/* Bit masks for HOST_STATUS */
+
+#define HOST_STAT_READY 0x1 /* DMA Ready */
+#define HOST_STAT_nREADY 0x0
+#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
+#define HOST_STAT_nFIFOFULL 0x0
+#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
+#define HOST_STAT_nFIFOEMPTY 0x0
+#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
+#define HOST_STAT_nCOMPLETE 0x0
+#define HOST_STAT_HSHK 0x10 /* Host Handshake */
+#define HOST_STAT_nHSHK 0x0
+#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
+#define HOST_STAT_nTIMEOUT 0x0
+#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
+#define HOST_STAT_nHIRQ 0x0
+#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
+#define HOST_STAT_nALLOW_CNFG 0x0
+#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
+#define HOST_STAT_nDMA_DIR 0x0
+#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
+#define HOST_STAT_nBTE 0x0
+#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
+#define HOST_STAT_nHOSTRD_DONE 0x0
+
+/* Bit masks for HOST_TIMEOUT */
+
+#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
+
+/* Bit masks for SECURE_SYSSWT */
+
+#define EMUDABL 0x1 /* Emulation Disable. */
+#define nEMUDABL 0x0
+#define RSTDABL 0x2 /* Reset Disable */
+#define nRSTDABL 0x0
+#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
+#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
+#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
+#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
+#define nDMA0OVR 0x0
+#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
+#define nDMA1OVR 0x0
+#define EMUOVR 0x4000 /* Emulation Override */
+#define nEMUOVR 0x0
+#define OTPSEN 0x8000 /* OTP Secrets Enable. */
+#define nOTPSEN 0x0
+#define L2DABL 0x70000 /* L2 Memory Disable. */
+
+/* Bit masks for SECURE_CONTROL */
+
+#define SECURE0 0x1 /* SECURE 0 */
+#define nSECURE0 0x0
+#define SECURE1 0x2 /* SECURE 1 */
+#define nSECURE1 0x0
+#define SECURE2 0x4 /* SECURE 2 */
+#define nSECURE2 0x0
+#define SECURE3 0x8 /* SECURE 3 */
+#define nSECURE3 0x0
+
+/* Bit masks for SECURE_STATUS */
+
+#define SECMODE 0x3 /* Secured Mode Control State */
+#define NMI 0x4 /* Non Maskable Interrupt */
+#define nNMI 0x0
+#define AFVALID 0x8 /* Authentication Firmware Valid */
+#define nAFVALID 0x0
+#define AFEXIT 0x10 /* Authentication Firmware Exit */
+#define nAFEXIT 0x0
+#define SECSTAT 0xe0 /* Secure Status */
#endif /* _DEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
deleted file mode 100644
index 5f84913dcd9..00000000000
--- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
- */
-
-#ifndef _DEF_BF51X_H
-#define _DEF_BF51X_H
-
-
-/* ************************************************************** */
-/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x */
-/* ************************************************************** */
-
-/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
-#define PLL_CTL 0xFFC00000 /* PLL Control Register */
-#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
-#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
-#define PLL_STAT 0xFFC0000C /* PLL Status Register */
-#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
-#define CHIPID 0xFFC00014 /* Device ID Register */
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
-#define SWRST 0xFFC00100 /* Software Reset Register */
-#define SYSCR 0xFFC00104 /* System Configuration Register */
-#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
-
-#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
-#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
-#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
-#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
-#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
-#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
-#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
-
-/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
-#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
-#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
-#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
-#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
-#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
-#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
-#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
-
-
-/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
-#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
-#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
-#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
-
-
-/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
-#define RTC_STAT 0xFFC00300 /* RTC Status Register */
-#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
-#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
-#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
-#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
-#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
-#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
-
-
-/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
-#define UART0_THR 0xFFC00400 /* Transmit Holding register */
-#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
-#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
-#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
-#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
-#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
-#define UART0_LCR 0xFFC0040C /* Line Control Register */
-#define UART0_MCR 0xFFC00410 /* Modem Control Register */
-#define UART0_LSR 0xFFC00414 /* Line Status Register */
-#define UART0_MSR 0xFFC00418 /* Modem Status Register */
-#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
-#define UART0_GCTL 0xFFC00424 /* Global Control Register */
-
-/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
-#define SPI0_REGBASE 0xFFC00500
-#define SPI0_CTL 0xFFC00500 /* SPI Control Register */
-#define SPI0_FLG 0xFFC00504 /* SPI Flag register */
-#define SPI0_STAT 0xFFC00508 /* SPI Status register */
-#define SPI0_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
-#define SPI0_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
-#define SPI0_BAUD 0xFFC00514 /* SPI Baud rate Register */
-#define SPI0_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
-
-/* SPI1 Controller (0xFFC03400 - 0xFFC034FF) */
-#define SPI1_REGBASE 0xFFC03400
-#define SPI1_CTL 0xFFC03400 /* SPI Control Register */
-#define SPI1_FLG 0xFFC03404 /* SPI Flag register */
-#define SPI1_STAT 0xFFC03408 /* SPI Status register */
-#define SPI1_TDBR 0xFFC0340C /* SPI Transmit Data Buffer Register */
-#define SPI1_RDBR 0xFFC03410 /* SPI Receive Data Buffer Register */
-#define SPI1_BAUD 0xFFC03414 /* SPI Baud rate Register */
-#define SPI1_SHADOW 0xFFC03418 /* SPI_RDBR Shadow Register */
-
-/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
-#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
-#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
-#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
-#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
-
-#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
-#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
-#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
-#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
-
-#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
-#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
-#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
-#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
-
-#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
-#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
-#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
-#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
-
-#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
-#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
-#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
-#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
-
-#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
-#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
-#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
-#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
-
-#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
-#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
-#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
-#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
-
-#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
-#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
-#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
-#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
-
-#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
-#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
-#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
-#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
-#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
-#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
-#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
-#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
-#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
-#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
-#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
-#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
-#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
-#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
-#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
-#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
-#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
-#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
-#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
-#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
-
-/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
-#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
-#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
-#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
-#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
-#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
-#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
-#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
-#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
-#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
-#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
-#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
-#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
-#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
-#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
-#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
-#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
-#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
-#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
-
-/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
-#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
-#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
-#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
-#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
-#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
-#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
-#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
-#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
-#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
-#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
-#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
-#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
-#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
-#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
-#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
-#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
-#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
-#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
-#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
-#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
-#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
-#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
-#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
-#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
-#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
-#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
-#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
-#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
-#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
-
-/* DMA Traffic Control Registers */
-#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
-
-/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
-#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
-#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
-#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
-#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
-#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
-#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
-#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
-#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
-#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
-#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
-#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
-#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
-#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
-
-#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
-#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
-#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
-#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
-#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
-#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
-#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
-#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
-#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
-#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
-#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
-#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
-#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
-
-#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
-#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
-#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
-#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
-#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
-#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
-#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
-#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
-#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
-#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
-#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
-#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
-#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
-
-#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
-#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
-#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
-#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
-#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
-#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
-#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
-#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
-#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
-#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
-#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
-#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
-#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
-
-#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
-#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
-#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
-#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
-#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
-#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
-#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
-#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
-#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
-#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
-#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
-#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
-#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
-
-#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
-#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
-#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
-#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
-#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
-#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
-#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
-#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
-#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
-#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
-#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
-#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
-#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
-
-#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
-#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
-#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
-#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
-#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
-#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
-#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
-#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
-#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
-#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
-#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
-#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
-#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
-
-#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
-#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
-#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
-#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
-#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
-#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
-#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
-#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
-#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
-#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
-#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
-#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
-#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
-
-#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
-#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
-#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
-#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
-#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
-#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
-#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
-#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
-#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
-#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
-#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
-#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
-#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
-
-#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
-#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
-#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
-#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
-#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
-#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
-#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
-#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
-#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
-#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
-#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
-#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
-#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
-
-#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
-#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
-#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
-#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
-#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
-#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
-#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
-#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
-#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
-#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
-#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
-#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
-#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
-
-#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
-#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
-#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
-#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
-#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
-#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
-#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
-#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
-#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
-#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
-#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
-#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
-#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
-
-#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
-#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
-#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
-#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
-#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
-#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
-#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
-#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
-#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
-#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
-#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
-#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
-#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
-
-#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
-#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
-#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
-#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
-#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
-#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
-#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
-#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
-#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
-#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
-#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
-#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
-#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
-
-#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
-#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
-#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
-#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
-#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
-#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
-#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
-#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
-#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
-#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
-#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
-#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
-#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
-
-#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
-#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
-#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
-#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
-#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
-#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
-#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
-#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
-#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
-#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
-#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
-#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
-#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
-#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
-#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
-#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
-#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
-#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
-
-
-/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
-#define TWI0_REGBASE 0xFFC01400
-#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
-#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
-#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
-#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
-#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
-#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
-#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
-#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
-#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
-#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
-#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
-#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
-#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
-#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
-#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
-#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
-
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
-#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
-#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
-#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
-#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
-#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
-#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
-#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
-#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
-#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
-#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
-#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
-#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
-#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
-#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
-#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
-#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
-#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
-#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
-#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
-#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
-#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
-#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
-#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
-#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
-#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
-#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
-#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
-#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
-#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
-#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
-#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
-#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
-#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
-#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
-
-
-/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
-#define UART1_THR 0xFFC02000 /* Transmit Holding register */
-#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
-#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
-#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
-#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
-#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
-#define UART1_LCR 0xFFC0200C /* Line Control Register */
-#define UART1_MCR 0xFFC02010 /* Modem Control Register */
-#define UART1_LSR 0xFFC02014 /* Line Status Register */
-#define UART1_MSR 0xFFC02018 /* Modem Status Register */
-#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
-#define UART1_GCTL 0xFFC02024 /* Global Control Register */
-
-
-/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
-#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
-#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
-#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
-#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
-
-
-/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
-#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
-#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
-#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
-#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
-#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
-#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
-#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
-
-#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
-#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
-#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
-#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
-#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
-#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
-#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
-
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-#define PORTF_MUX 0xFFC03210 /* Port F mux control */
-#define PORTG_MUX 0xFFC03214 /* Port G mux control */
-#define PORTH_MUX 0xFFC03218 /* Port H mux control */
-#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
-#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
-#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
-#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
-#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
-#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
-#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
-#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
-#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
-#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
-#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
-#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
-
-
-/***********************************************************************************
-** System MMR Register Bits And Macros
-**
-** Disclaimer: All macros are intended to make C and Assembly code more readable.
-** Use these macros carefully, as any that do left shifts for field
-** depositing will result in the lower order bits being destroyed. Any
-** macro that shifts left to properly position the bit-field should be
-** used as part of an OR to initialize a register and NOT as a dynamic
-** modifier UNLESS the lower order bits are saved and ORed back in when
-** the macro is used.
-*************************************************************************************/
-
-/* CHIPID Masks */
-#define CHIPID_VERSION 0xF0000000
-#define CHIPID_FAMILY 0x0FFFF000
-#define CHIPID_MANUFACTURE 0x00000FFE
-
-/* SWRST Masks */
-#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
-#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
-#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
-#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
-#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
-
-/* SYSCR Masks */
-#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
-#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
-
-
-/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
-/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
-
-#if 0
-#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
-
-#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
-#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
-#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
-#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
-#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
-#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
-#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
-
-#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
-#define IRQ_TWI 0x00000200 /* TWI Interrupt */
-#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
-#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
-#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
-#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
-#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
-#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
-
-#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
-#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
-#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
-#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
-#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
-#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
-#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
-#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
-#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
-#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
-
-#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
-#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
-#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
-#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
-#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
-#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
-#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
-#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
-#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
-#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
-#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
-#endif
-
-/* SIC_IAR0 Macros */
-#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
-#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
-#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
-#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
-#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
-#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
-#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
-#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
-
-/* SIC_IAR1 Macros */
-#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
-#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
-#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
-#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
-#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
-#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
-#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
-#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
-
-/* SIC_IAR2 Macros */
-#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
-#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
-#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
-#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
-#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
-#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
-#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
-#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
-
-/* SIC_IAR3 Macros */
-#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
-#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
-#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
-#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
-#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
-#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
-#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
-#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
-
-
-/* SIC_IMASK Masks */
-#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
-#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
-#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
-#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
-
-/* SIC_IWR Masks */
-#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
-#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
-#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
-#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
-
-
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks */
-#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
-#define STB 0x04 /* Stop Bits */
-#define PEN 0x08 /* Parity Enable */
-#define EPS 0x10 /* Even Parity Select */
-#define STP 0x20 /* Stick Parity */
-#define SB 0x40 /* Set Break */
-#define DLAB 0x80 /* Divisor Latch Access */
-
-/* UARTx_MCR Mask */
-#define LOOP_ENA 0x10 /* Loopback Mode Enable */
-#define LOOP_ENA_P 0x04
-
-/* UARTx_LSR Masks */
-#define DR 0x01 /* Data Ready */
-#define OE 0x02 /* Overrun Error */
-#define PE 0x04 /* Parity Error */
-#define FE 0x08 /* Framing Error */
-#define BI 0x10 /* Break Interrupt */
-#define THRE 0x20 /* THR Empty */
-#define TEMT 0x40 /* TSR and UART_THR Empty */
-
-/* UARTx_IER Masks */
-#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
-#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
-#define ELSI 0x04 /* Enable RX Status Interrupt */
-
-/* UARTx_IIR Masks */
-#define NINT 0x01 /* Pending Interrupt */
-#define IIR_TX_READY 0x02 /* UART_THR empty */
-#define IIR_RX_READY 0x04 /* Receive data ready */
-#define IIR_LINE_CHANGE 0x06 /* Receive line status */
-#define IIR_STATUS 0x06 /* Highest Priority Pending Interrupt */
-
-/* UARTx_GCTL Masks */
-#define UCEN 0x01 /* Enable UARTx Clocks */
-#define IREN 0x02 /* Enable IrDA Mode */
-#define TPOLC 0x04 /* IrDA TX Polarity Change */
-#define RPOLC 0x08 /* IrDA RX Polarity Change */
-#define FPE 0x10 /* Force Parity Error On Transmit */
-#define FFE 0x20 /* Force Framing Error On Transmit */
-
-
-/* **************** GENERAL PURPOSE TIMER MASKS **********************/
-/* TIMER_ENABLE Masks */
-#define TIMEN0 0x0001 /* Enable Timer 0 */
-#define TIMEN1 0x0002 /* Enable Timer 1 */
-#define TIMEN2 0x0004 /* Enable Timer 2 */
-#define TIMEN3 0x0008 /* Enable Timer 3 */
-#define TIMEN4 0x0010 /* Enable Timer 4 */
-#define TIMEN5 0x0020 /* Enable Timer 5 */
-#define TIMEN6 0x0040 /* Enable Timer 6 */
-#define TIMEN7 0x0080 /* Enable Timer 7 */
-
-/* TIMER_DISABLE Masks */
-#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
-#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
-#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
-#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
-#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
-#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
-#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
-#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
-
-/* TIMER_STATUS Masks */
-#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
-#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
-#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
-#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
-#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
-#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
-#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
-#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
-#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
-#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
-#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
-#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
-#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
-#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
-#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
-#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
-#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
-#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
-#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
-#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
-#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
-#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
-#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
-#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define TOVL_ERR0 TOVF_ERR0
-#define TOVL_ERR1 TOVF_ERR1
-#define TOVL_ERR2 TOVF_ERR2
-#define TOVL_ERR3 TOVF_ERR3
-#define TOVL_ERR4 TOVF_ERR4
-#define TOVL_ERR5 TOVF_ERR5
-#define TOVL_ERR6 TOVF_ERR6
-#define TOVL_ERR7 TOVF_ERR7
-
-/* TIMERx_CONFIG Masks */
-#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
-#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
-#define EXT_CLK 0x0003 /* External Clock Mode */
-#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
-#define PERIOD_CNT 0x0008 /* Period Count */
-#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
-#define TIN_SEL 0x0020 /* Timer Input Select */
-#define OUT_DIS 0x0040 /* Output Pad Disable */
-#define CLK_SEL 0x0080 /* Timer Clock Select */
-#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
-#define EMU_RUN 0x0200 /* Emulation Behavior Select */
-#define ERR_TYP 0xC000 /* Error Type */
-
-
-/* ****************** GPIO PORTS F, G, H MASKS ***********************/
-/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
-/* Port F Masks */
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* Port G Masks */
-#define PG0 0x0001
-#define PG1 0x0002
-#define PG2 0x0004
-#define PG3 0x0008
-#define PG4 0x0010
-#define PG5 0x0020
-#define PG6 0x0040
-#define PG7 0x0080
-#define PG8 0x0100
-#define PG9 0x0200
-#define PG10 0x0400
-#define PG11 0x0800
-#define PG12 0x1000
-#define PG13 0x2000
-#define PG14 0x4000
-#define PG15 0x8000
-
-/* Port H Masks */
-#define PH0 0x0001
-#define PH1 0x0002
-#define PH2 0x0004
-#define PH3 0x0008
-#define PH4 0x0010
-#define PH5 0x0020
-#define PH6 0x0040
-#define PH7 0x0080
-
-/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
-/* EBIU_AMGCTL Masks */
-#define AMCKEN 0x0001 /* Enable CLKOUT */
-#define AMBEN_NONE 0x0000 /* All Banks Disabled */
-#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
-#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
-#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
-#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
-
-/* EBIU_AMBCTL0 Masks */
-#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
-#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
-#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
-#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
-#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
-#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
-#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
-#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
-#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
-#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
-#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
-#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
-#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
-#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
-#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
-#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
-#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
-#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
-#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
-#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
-#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
-#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
-#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
-#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
-#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
-#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
-#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
-#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
-#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
-#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
-#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
-#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
-#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
-#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
-#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
-#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
-
-#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
-#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
-#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
-#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
-#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
-#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
-#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
-#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
-#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
-#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
-#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
-#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
-#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
-#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
-#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
-#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
-#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
-#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
-#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
-#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
-#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
-#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
-#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
-#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
-#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
-#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
-#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
-#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
-#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
-#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
-#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
-#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
-#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
-#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
-#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
-#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
-
-/* EBIU_AMBCTL1 Masks */
-#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
-#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
-#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
-#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
-#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
-#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
-#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
-#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
-#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
-#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
-#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
-#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
-#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
-#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
-#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
-#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
-#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
-#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
-#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
-#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
-#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
-#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
-#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
-#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
-#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
-#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
-#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
-#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
-#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
-#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
-#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
-#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
-#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
-#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
-#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
-#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
-
-#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
-#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
-#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
-#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
-#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
-#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
-#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
-#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
-#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
-#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
-#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
-#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
-#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
-#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
-#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
-#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
-#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
-#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
-#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
-#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
-#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
-#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
-#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
-#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
-#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
-#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
-#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
-#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
-#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
-#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
-#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
-#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
-#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
-#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
-#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
-#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
-
-
-/* ********************** SDRAM CONTROLLER MASKS **********************************************/
-/* EBIU_SDGCTL Masks */
-#define SCTLE 0x00000001 /* Enable SDRAM Signals */
-#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
-#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
-#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
-#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
-#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
-#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
-#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
-#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
-#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
-#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
-#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
-#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
-#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
-#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
-#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
-#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
-#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
-#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
-#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
-#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
-#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
-#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
-#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
-#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
-#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
-#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
-#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
-#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
-#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
-#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
-#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
-#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
-#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
-#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
-#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
-#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
-#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
-#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
-#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
-#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
-#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
-#define EBUFE 0x02000000 /* Enable External Buffering Timing */
-#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
-#define EMREN 0x10000000 /* Extended Mode Register Enable */
-#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
-#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
-
-/* EBIU_SDBCTL Masks */
-#define EBE 0x0001 /* Enable SDRAM External Bank */
-#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
-#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
-#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
-#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
-#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
-#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
-#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
-#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
-#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
-#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
-
-/* EBIU_SDSTAT Masks */
-#define SDCI 0x0001 /* SDRAM Controller Idle */
-#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
-#define SDPUA 0x0004 /* SDRAM Power-Up Active */
-#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
-#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
-#define BGSTAT 0x0020 /* Bus Grant Status */
-
-
-/* ************************** DMA CONTROLLER MASKS ********************************/
-
-/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
-#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
-#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
-#define PMAP_PPI 0x0000 /* PPI Port DMA */
-#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
-#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
-#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
-#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
-#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
-#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
-#define PMAP_SPI 0x7000 /* SPI Port DMA */
-#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
-#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
-#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
-#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
-
-/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
-/* PPI_CONTROL Masks */
-#define PORT_EN 0x0001 /* PPI Port Enable */
-#define PORT_DIR 0x0002 /* PPI Port Direction */
-#define XFR_TYPE 0x000C /* PPI Transfer Type */
-#define PORT_CFG 0x0030 /* PPI Port Configuration */
-#define FLD_SEL 0x0040 /* PPI Active Field Select */
-#define PACK_EN 0x0080 /* PPI Packing Mode */
-#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
-#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
-#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
-#define DLEN_8 0x0000 /* Data Length = 8 Bits */
-#define DLEN_10 0x0800 /* Data Length = 10 Bits */
-#define DLEN_11 0x1000 /* Data Length = 11 Bits */
-#define DLEN_12 0x1800 /* Data Length = 12 Bits */
-#define DLEN_13 0x2000 /* Data Length = 13 Bits */
-#define DLEN_14 0x2800 /* Data Length = 14 Bits */
-#define DLEN_15 0x3000 /* Data Length = 15 Bits */
-#define DLEN_16 0x3800 /* Data Length = 16 Bits */
-#define DLENGTH 0x3800 /* PPI Data Length */
-#define POLC 0x4000 /* PPI Clock Polarity */
-#define POLS 0x8000 /* PPI Frame Sync Polarity */
-
-/* PPI_STATUS Masks */
-#define FLD 0x0400 /* Field Indicator */
-#define FT_ERR 0x0800 /* Frame Track Error */
-#define OVR 0x1000 /* FIFO Overflow Error */
-#define UNDR 0x2000 /* FIFO Underrun Error */
-#define ERR_DET 0x4000 /* Error Detected Indicator */
-#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
-
-
-/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
-#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
-#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
-
-/* TWI_PRESCALE Masks */
-#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
-#define TWI_ENA 0x0080 /* TWI Enable */
-#define SCCB 0x0200 /* SCCB Compatibility Enable */
-
-/* TWI_SLAVE_CTL Masks */
-#define SEN 0x0001 /* Slave Enable */
-#define SADD_LEN 0x0002 /* Slave Address Length */
-#define STDVAL 0x0004 /* Slave Transmit Data Valid */
-#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
-#define GEN 0x0010 /* General Call Adrress Matching Enabled */
-
-/* TWI_SLAVE_STAT Masks */
-#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
-#define GCALL 0x0002 /* General Call Indicator */
-
-/* TWI_MASTER_CTL Masks */
-#define MEN 0x0001 /* Master Mode Enable */
-#define MADD_LEN 0x0002 /* Master Address Length */
-#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
-#define FAST 0x0008 /* Use Fast Mode Timing Specs */
-#define STOP 0x0010 /* Issue Stop Condition */
-#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
-#define DCNT 0x3FC0 /* Data Bytes To Transfer */
-#define SDAOVR 0x4000 /* Serial Data Override */
-#define SCLOVR 0x8000 /* Serial Clock Override */
-
-/* TWI_MASTER_STAT Masks */
-#define MPROG 0x0001 /* Master Transfer In Progress */
-#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
-#define ANAK 0x0004 /* Address Not Acknowledged */
-#define DNAK 0x0008 /* Data Not Acknowledged */
-#define BUFRDERR 0x0010 /* Buffer Read Error */
-#define BUFWRERR 0x0020 /* Buffer Write Error */
-#define SDASEN 0x0040 /* Serial Data Sense */
-#define SCLSEN 0x0080 /* Serial Clock Sense */
-#define BUSBUSY 0x0100 /* Bus Busy Indicator */
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
-#define SINIT 0x0001 /* Slave Transfer Initiated */
-#define SCOMP 0x0002 /* Slave Transfer Complete */
-#define SERR 0x0004 /* Slave Transfer Error */
-#define SOVF 0x0008 /* Slave Overflow */
-#define MCOMP 0x0010 /* Master Transfer Complete */
-#define MERR 0x0020 /* Master Transfer Error */
-#define XMTSERV 0x0040 /* Transmit FIFO Service */
-#define RCVSERV 0x0080 /* Receive FIFO Service */
-
-/* TWI_FIFO_CTRL Masks */
-#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
-#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
-#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
-#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
-
-/* TWI_FIFO_STAT Masks */
-#define XMTSTAT 0x0003 /* Transmit FIFO Status */
-#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
-#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
-#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
-
-#define RCVSTAT 0x000C /* Receive FIFO Status */
-#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
-#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
-#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-
-
-/* ******************* PIN CONTROL REGISTER MASKS ************************/
-/* PORT_MUX Masks */
-#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
-#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
-#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
-
-#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
-#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
-#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
-#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
-
-#define PFDE 0x0008 /* Port F DMA Request Enable */
-#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
-#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
-
-#define PFTE 0x0010 /* Port F Timer Enable */
-#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
-#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
-
-#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
-#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
-#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
-
-#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
-#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
-#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
-
-#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
-#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
-#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
-
-#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
-#define PFFE_TIMER 0x0000 /* Enable TMR2 */
-#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
-
-#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
-#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
-#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
-
-#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
-#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
-#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
-
-#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
-#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
-#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
-
-
-/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
-/* HDMAx_CTL Masks */
-#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
-#define REP 0x0002 /* HDMA Request Polarity */
-#define UTE 0x0004 /* Urgency Threshold Enable */
-#define OIE 0x0010 /* Overflow Interrupt Enable */
-#define BDIE 0x0020 /* Block Done Interrupt Enable */
-#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
-#define DRQ 0x0300 /* HDMA Request Type */
-#define DRQ_NONE 0x0000 /* No Request */
-#define DRQ_SINGLE 0x0100 /* Channels Request Single */
-#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
-#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
-#define RBC 0x1000 /* Reload BCNT With IBCNT */
-#define PS 0x2000 /* HDMA Pin Status */
-#define OI 0x4000 /* Overflow Interrupt Generated */
-#define BDI 0x8000 /* Block Done Interrupt Generated */
-
-/* entry addresses of the user-callable Boot ROM functions */
-
-#define _BOOTROM_RESET 0xEF000000
-#define _BOOTROM_FINAL_INIT 0xEF000002
-#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
-#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
-#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
-#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
-#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
-#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
-#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define PGDE_UART PFDE_UART
-#define PGDE_DMA PFDE_DMA
-#define CKELOW SCKELOW
-
-/* HOST Port Registers */
-
-#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
-#define HOST_STATUS 0xffc03404 /* HOST Status Register */
-#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
-
-/* Counter Registers */
-
-#define CNT_CONFIG 0xffc03500 /* Configuration Register */
-#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
-#define CNT_STATUS 0xffc03508 /* Status Register */
-#define CNT_COMMAND 0xffc0350c /* Command Register */
-#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
-#define CNT_COUNTER 0xffc03514 /* Counter Register */
-#define CNT_MAX 0xffc03518 /* Maximal Count Register */
-#define CNT_MIN 0xffc0351c /* Minimal Count Register */
-
-/* OTP/FUSE Registers */
-
-#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
-#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
-#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
-#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
-
-/* Security Registers */
-
-#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
-#define SECURE_CONTROL 0xffc03624 /* Secure Control */
-#define SECURE_STATUS 0xffc03628 /* Secure Status */
-
-/* OTP Read/Write Data Buffer Registers */
-
-#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-
-/* Motor Control PWM Registers */
-
-#define PWM_CTRL 0xffc03700 /* PWM Control Register */
-#define PWM_STAT 0xffc03704 /* PWM Status Register */
-#define PWM_TM 0xffc03708 /* PWM Period Register */
-#define PWM_DT 0xffc0370c /* PWM Dead Time Register */
-#define PWM_GATE 0xffc03710 /* PWM Chopping Control */
-#define PWM_CHA 0xffc03714 /* PWM Channel A Duty Control */
-#define PWM_CHB 0xffc03718 /* PWM Channel B Duty Control */
-#define PWM_CHC 0xffc0371c /* PWM Channel C Duty Control */
-#define PWM_SEG 0xffc03720 /* PWM Crossover and Output Enable */
-#define PWM_SYNCWT 0xffc03724 /* PWM Sync Pluse Width Control */
-#define PWM_CHAL 0xffc03728 /* PWM Channel AL Duty Control (SR mode only) */
-#define PWM_CHBL 0xffc0372c /* PWM Channel BL Duty Control (SR mode only) */
-#define PWM_CHCL 0xffc03730 /* PWM Channel CL Duty Control (SR mode only) */
-#define PWM_LSI 0xffc03734 /* PWM Low Side Invert (SR mode only) */
-#define PWM_STAT2 0xffc03738 /* PWM Status Register 2 */
-
-
-/* ********************************************************** */
-/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
-/* and MULTI BIT READ MACROS */
-/* ********************************************************** */
-
-/* Bit masks for HOST_CONTROL */
-
-#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
-#define HOST_CNTR_nHOST_EN 0x0
-#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
-#define HOST_CNTR_nHOST_END 0x0
-#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
-#define HOST_CNTR_nDATA_SIZE 0x0
-#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
-#define HOST_CNTR_nHOST_RST 0x0
-#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
-#define HOST_CNTR_nHRDY_OVR 0x0
-#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
-#define HOST_CNTR_nINT_MODE 0x0
-#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
-#define HOST_CNTR_ nBT_EN 0x0
-#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
-#define HOST_CNTR_nEHW 0x0
-#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
-#define HOST_CNTR_nEHR 0x0
-#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
-#define HOST_CNTR_nBDR 0x0
-
-/* Bit masks for HOST_STATUS */
-
-#define HOST_STAT_READY 0x1 /* DMA Ready */
-#define HOST_STAT_nREADY 0x0
-#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
-#define HOST_STAT_nFIFOFULL 0x0
-#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
-#define HOST_STAT_nFIFOEMPTY 0x0
-#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
-#define HOST_STAT_nCOMPLETE 0x0
-#define HOST_STAT_HSHK 0x10 /* Host Handshake */
-#define HOST_STAT_nHSHK 0x0
-#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
-#define HOST_STAT_nTIMEOUT 0x0
-#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
-#define HOST_STAT_nHIRQ 0x0
-#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
-#define HOST_STAT_nALLOW_CNFG 0x0
-#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
-#define HOST_STAT_nDMA_DIR 0x0
-#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
-#define HOST_STAT_nBTE 0x0
-#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
-#define HOST_STAT_nHOSTRD_DONE 0x0
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
-
-/* Bit masks for SECURE_SYSSWT */
-
-#define EMUDABL 0x1 /* Emulation Disable. */
-#define nEMUDABL 0x0
-#define RSTDABL 0x2 /* Reset Disable */
-#define nRSTDABL 0x0
-#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
-#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
-#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
-#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
-#define nDMA0OVR 0x0
-#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
-#define nDMA1OVR 0x0
-#define EMUOVR 0x4000 /* Emulation Override */
-#define nEMUOVR 0x0
-#define OTPSEN 0x8000 /* OTP Secrets Enable. */
-#define nOTPSEN 0x0
-#define L2DABL 0x70000 /* L2 Memory Disable. */
-
-/* Bit masks for SECURE_CONTROL */
-
-#define SECURE0 0x1 /* SECURE 0 */
-#define nSECURE0 0x0
-#define SECURE1 0x2 /* SECURE 1 */
-#define nSECURE1 0x0
-#define SECURE2 0x4 /* SECURE 2 */
-#define nSECURE2 0x0
-#define SECURE3 0x8 /* SECURE 3 */
-#define nSECURE3 0x0
-
-/* Bit masks for SECURE_STATUS */
-
-#define SECMODE 0x3 /* Secured Mode Control State */
-#define NMI 0x4 /* Non Maskable Interrupt */
-#define nNMI 0x0
-#define AFVALID 0x8 /* Authentication Firmware Valid */
-#define nAFVALID 0x0
-#define AFEXIT 0x10 /* Authentication Firmware Exit */
-#define nAFEXIT 0x0
-#define SECSTAT 0xe0 /* Secure Status */
-
-
-
-#endif /* _DEF_BF51X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/gpio.h b/arch/blackfin/mach-bf518/include/mach/gpio.h
index 9af6ce0f632..b480705bfc2 100644
--- a/arch/blackfin/mach-bf518/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf518/include/mach/gpio.h
@@ -55,4 +55,8 @@
#define PORT_G GPIO_PG0
#define PORT_H GPIO_PH0
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf518/include/mach/pll.h b/arch/blackfin/mach-bf518/include/mach/pll.h
index d5502988896..94cca674d83 100644
--- a/arch/blackfin/mach-bf518/include/mach/pll.h
+++ b/arch/blackfin/mach-bf518/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_PLL_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
-
- bfin_write16(PLL_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_VR_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
-
- bfin_write16(VR_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 52295fff557..ccab4c689dc 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -67,6 +67,7 @@ static struct musb_hdrc_config musb_config = {
* if it is the case.
*/
.gpio_vrsel_active = 1,
+ .clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
@@ -419,7 +420,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -474,7 +475,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -627,9 +628,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -661,9 +662,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 50533edc399..c9d6dc88f0e 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -104,6 +104,7 @@ static struct musb_hdrc_config musb_config = {
* if it is the case.
*/
.gpio_vrsel_active = 1,
+ .clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
@@ -614,7 +615,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -669,7 +670,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -801,9 +802,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -835,9 +836,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index d06177b5fe2..b7101aa6e3a 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -68,6 +68,7 @@ static struct musb_hdrc_config musb_config = {
* if it is the case.
*/
.gpio_vrsel_active = 1,
+ .clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
@@ -499,7 +500,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -554,7 +555,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -681,9 +682,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -715,9 +716,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 35a88a5a501..2cd2ff6f304 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -108,6 +108,7 @@ static struct musb_hdrc_config musb_config = {
* if it is the case.
*/
.gpio_vrsel_active = 1,
+ .clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
@@ -708,7 +709,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -763,7 +764,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -962,6 +963,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
I2C_BOARD_INFO("ad5252", 0x2f),
},
#endif
+#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE)
+ {
+ I2C_BOARD_INFO("adau1373", 0x1A),
+ },
+#endif
};
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
@@ -984,9 +990,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -1018,9 +1024,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 130861bd258..18d303dd562 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -193,7 +193,7 @@ static unsigned gpio_addr_inputs[] = {
GPIO_PG1, GPIO_PH9, GPIO_PH10
};
-static struct gpio_decoder_platfrom_data spi_decoded_cs = {
+static struct gpio_decoder_platform_data spi_decoded_cs = {
.base = EXP_GPIO_SPISEL_BASE,
.input_addrs = gpio_addr_inputs,
.nr_input_addrs = ARRAY_SIZE(gpio_addr_inputs),
@@ -586,7 +586,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -642,7 +642,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -799,9 +799,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -834,9 +834,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/dma.c b/arch/blackfin/mach-bf527/dma.c
index 7bc7577d6c4..1fabdefea73 100644
--- a/arch/blackfin/mach-bf527/dma.c
+++ b/arch/blackfin/mach-bf527/dma.c
@@ -11,7 +11,7 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
new file mode 100644
index 00000000000..00c603fe821
--- /dev/null
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS 2
+
+#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
index c1d55b878b4..960e08919de 100644
--- a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
* Licensed under the GPL-2 or later
*/
-#include <linux/serial.h>
#include <asm/dma.h>
#include <asm/portmux.h>
-#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v) bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
# define CONFIG_SERIAL_BFIN_CTSRTS
@@ -54,50 +27,6 @@
# endif
#endif
-#define BFIN_UART_TX_FIFO_SIZE 2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
- struct uart_port port;
- unsigned int old_status;
- int status_irq;
- unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- int tx_done;
- int tx_count;
- struct circ_buf rx_dma_buf;
- struct timer_list rx_dma_timer;
- int rx_dma_nrows;
- unsigned int tx_dma_channel;
- unsigned int rx_dma_channel;
- struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- struct timer_list cts_timer;
- int cts_pin;
- int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
- unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
- uart->lsr |= (lsr & (BI|FE|PE|OE));
- return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
- uart->lsr = 0;
- bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
struct bfin_serial_res {
unsigned long uart_base_addr;
int uart_irq;
@@ -146,3 +75,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
};
#define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index f714c5de307..e1d27927448 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -1,49 +1,37 @@
/*
- * Copyright 2007-2009 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
*/
#ifndef _MACH_BLACKFIN_H_
#define _MACH_BLACKFIN_H_
#include "bf527.h"
-#include "defBF522.h"
#include "anomaly.h"
-#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
-#include "defBF527.h"
+#include <asm/def_LPBlackfin.h>
+#if defined(CONFIG_BF523) || defined(CONFIG_BF522)
+# include "defBF522.h"
#endif
-
#if defined(CONFIG_BF525) || defined(CONFIG_BF524)
-#include "defBF525.h"
+# include "defBF525.h"
#endif
-
-#if !defined(__ASSEMBLY__)
-#include "cdefBF522.h"
-
#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
-#include "cdefBF527.h"
+# include "defBF527.h"
#endif
-#if defined(CONFIG_BF525) || defined(CONFIG_BF524)
-#include "cdefBF525.h"
-#endif
+#if !defined(__ASSEMBLY__)
+# include <asm/cdef_LPBlackfin.h>
+# if defined(CONFIG_BF523) || defined(CONFIG_BF522)
+# include "cdefBF522.h"
+# endif
+# if defined(CONFIG_BF525) || defined(CONFIG_BF524)
+# include "cdefBF525.h"
+# endif
+# if defined(CONFIG_BF527) || defined(CONFIG_BF526)
+# include "cdefBF527.h"
+# endif
#endif
-#define BFIN_UART_NR_PORTS 2
-
-#define OFFSET_THR 0x00 /* Transmit Holding register */
-#define OFFSET_RBR 0x00 /* Receive Buffer register */
-#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
-#define OFFSET_IER 0x04 /* Interrupt Enable Register */
-#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
-#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
-#define OFFSET_LCR 0x0C /* Line Control Register */
-#define OFFSET_MCR 0x10 /* Modem Control Register */
-#define OFFSET_LSR 0x14 /* Line Status Register */
-#define OFFSET_MSR 0x18 /* Modem Status Register */
-#define OFFSET_SCR 0x1C /* SCR Scratch Register */
-#define OFFSET_GCTL 0x24 /* Global Control Register */
-
#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
index 1079af8c7ae..618dfcdfa91 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
@@ -1,21 +1,1095 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
*/
#ifndef _CDEF_BF522_H
#define _CDEF_BF522_H
-/* include all Core registers and bit definitions */
-#include "defBF522.h"
+/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
+#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID() bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define bfin_read_SWRST() bfin_read16(SWRST)
+#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
+#define bfin_read_SYSCR() bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
-/* include cdefBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "cdefBF52x_base.h"
+#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
+#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
+
+#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
+
+#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
+#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
+
+#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
+#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
+
+/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
+
+#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
+#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
+#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
+#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
+
+/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
+#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
+
+
+/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
+#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
+#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
+
+
+/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
+#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
+#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
+#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
+
+
+/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
+#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL)
+#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL, val)
+#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG)
+#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG, val)
+#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT)
+#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT, val)
+#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR)
+#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR, val)
+#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR)
+#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR, val)
+#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD)
+#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD, val)
+#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW)
+#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW, val)
+
+
+/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
+#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
+
+#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
+
+#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
+
+#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
+
+#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
+
+#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
+
+#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
+
+#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
+
+#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
+#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
+
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
+#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
+#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
+#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
+
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
+#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
+#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
+#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
+
+
+/* DMA Traffic Control Registers */
+#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER, val)
+#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT, val)
+
+/* DMA Controller */
+#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
+#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
+#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
+
+
+/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
+#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
+#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
+#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
+#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
+#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
+#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
+#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
+#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
+#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
+#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
+#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
+#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
+#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
+#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
+#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
+#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
+#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
+#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
+#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
+#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
+#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
+#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
+#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
+#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
+#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
+#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
+#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
+#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
+#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
+#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
+#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
+#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
+#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
+#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
+#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
+#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
+#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
+#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
+#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
+#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
+#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
+#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
+#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
+#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
+#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
+#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
+#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
+#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
+#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
+#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
+#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
+#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
+#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
+#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
+#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
+#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
+#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
+#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
+#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
+#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
+#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
+#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
+
+
+/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
+#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
+#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
+#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
+
+/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
+
+/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
+#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
+#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
+#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
+#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
+#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
+#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
+#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
+#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
+
+
+/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
+#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
+#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
+#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
+#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
+#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
+#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
+#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
+#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
+#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
+#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
+#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
+#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
+#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
+#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
+
+#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
+#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
+#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
+#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
+#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
+#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
+#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
+#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
+#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
+#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
+#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
+#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
+#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
+#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
+
+/* ==== end from cdefBF534.h ==== */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+
+#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
+#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
+#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
+#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
+
+#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
+#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
+#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
+#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
+#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
+#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
+#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
+#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
+#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
+#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
+#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
+#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
+#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
+#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
+#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
+#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
+#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
+#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
+#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
+#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
+#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
+#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
+#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
+#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
+
+/* HOST Port Registers */
+
+#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
+#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
+#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
+#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
+#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
+#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
+
+/* Security Registers */
+
+#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
+#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
+#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
+#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
+#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
+#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
+
+/* NFC Registers */
+
+#define bfin_read_NFC_CTL() bfin_read16(NFC_CTL)
+#define bfin_write_NFC_CTL(val) bfin_write16(NFC_CTL, val)
+#define bfin_read_NFC_STAT() bfin_read16(NFC_STAT)
+#define bfin_write_NFC_STAT(val) bfin_write16(NFC_STAT, val)
+#define bfin_read_NFC_IRQSTAT() bfin_read16(NFC_IRQSTAT)
+#define bfin_write_NFC_IRQSTAT(val) bfin_write16(NFC_IRQSTAT, val)
+#define bfin_read_NFC_IRQMASK() bfin_read16(NFC_IRQMASK)
+#define bfin_write_NFC_IRQMASK(val) bfin_write16(NFC_IRQMASK, val)
+#define bfin_read_NFC_ECC0() bfin_read16(NFC_ECC0)
+#define bfin_write_NFC_ECC0(val) bfin_write16(NFC_ECC0, val)
+#define bfin_read_NFC_ECC1() bfin_read16(NFC_ECC1)
+#define bfin_write_NFC_ECC1(val) bfin_write16(NFC_ECC1, val)
+#define bfin_read_NFC_ECC2() bfin_read16(NFC_ECC2)
+#define bfin_write_NFC_ECC2(val) bfin_write16(NFC_ECC2, val)
+#define bfin_read_NFC_ECC3() bfin_read16(NFC_ECC3)
+#define bfin_write_NFC_ECC3(val) bfin_write16(NFC_ECC3, val)
+#define bfin_read_NFC_COUNT() bfin_read16(NFC_COUNT)
+#define bfin_write_NFC_COUNT(val) bfin_write16(NFC_COUNT, val)
+#define bfin_read_NFC_RST() bfin_read16(NFC_RST)
+#define bfin_write_NFC_RST(val) bfin_write16(NFC_RST, val)
+#define bfin_read_NFC_PGCTL() bfin_read16(NFC_PGCTL)
+#define bfin_write_NFC_PGCTL(val) bfin_write16(NFC_PGCTL, val)
+#define bfin_read_NFC_READ() bfin_read16(NFC_READ)
+#define bfin_write_NFC_READ(val) bfin_write16(NFC_READ, val)
+#define bfin_read_NFC_ADDR() bfin_read16(NFC_ADDR)
+#define bfin_write_NFC_ADDR(val) bfin_write16(NFC_ADDR, val)
+#define bfin_read_NFC_CMD() bfin_read16(NFC_CMD)
+#define bfin_write_NFC_CMD(val) bfin_write16(NFC_CMD, val)
+#define bfin_read_NFC_DATA_WR() bfin_read16(NFC_DATA_WR)
+#define bfin_write_NFC_DATA_WR(val) bfin_write16(NFC_DATA_WR, val)
+#define bfin_read_NFC_DATA_RD() bfin_read16(NFC_DATA_RD)
+#define bfin_write_NFC_DATA_RD(val) bfin_write16(NFC_DATA_RD, val)
#endif /* _CDEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
index d7e2751c6bc..d90a85b6b6b 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
@@ -1,15 +1,12 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
*/
#ifndef _CDEF_BF525_H
#define _CDEF_BF525_H
-/* include all Core registers and bit definitions */
-#include "defBF525.h"
-
/* BF525 is BF522 + USB */
#include "cdefBF522.h"
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
index c7ba544d50b..eb22f586610 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
@@ -1,15 +1,12 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
*/
#ifndef _CDEF_BF527_H
#define _CDEF_BF527_H
-/* include all Core registers and bit definitions */
-#include "defBF527.h"
-
/* BF527 is BF525 + EMAC */
#include "cdefBF525.h"
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
deleted file mode 100644
index 3048b52bf46..00000000000
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
+++ /dev/null
@@ -1,1113 +0,0 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _CDEF_BF52X_H
-#define _CDEF_BF52X_H
-
-#include <asm/blackfin.h>
-
-#include "defBF52x_base.h"
-
-/* Include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* ==== begin from cdefBF534.h ==== */
-
-/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
-#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
-#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
-#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
-#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
-#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
-#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
-#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
-#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
-#define bfin_read_CHIPID() bfin_read32(CHIPID)
-#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
-#define bfin_read_SWRST() bfin_read16(SWRST)
-#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
-#define bfin_read_SYSCR() bfin_read16(SYSCR)
-#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
-
-#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
-#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
-#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
-#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
-#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
-#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
-
-#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
-#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
-#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
-#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
-#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
-#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
-#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
-#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
-
-#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
-#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
-#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
-#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
-
-#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
-#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
-#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
-#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
-
-/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
-
-#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
-#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
-#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
-#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
-#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
-#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
-#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
-#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
-#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
-#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
-#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
-#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
-#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
-#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
-
-/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
-#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
-#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
-#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
-#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
-#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
-#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
-
-
-/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
-#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
-#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
-#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
-#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
-#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
-#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
-#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
-#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
-#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
-#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
-#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
-#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
-#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
-#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
-
-
-/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
-#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
-#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
-#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
-#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
-#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
-#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
-#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
-#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
-#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
-#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
-#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
-#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
-#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
-#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
-#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
-#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
-#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
-#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
-#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
-#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
-#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
-#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
-#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
-#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
-
-
-/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
-#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL)
-#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL, val)
-#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG)
-#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG, val)
-#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT)
-#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT, val)
-#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR)
-#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR, val)
-#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR)
-#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR, val)
-#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD)
-#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD, val)
-#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW)
-#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW, val)
-
-
-/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
-#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
-#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
-#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
-#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
-#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
-#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
-#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
-#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
-
-#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
-#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
-#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
-#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
-#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
-#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
-#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
-#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
-
-#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
-#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
-#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
-#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
-#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
-#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
-#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
-#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
-
-#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
-#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
-#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
-#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
-#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
-#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
-#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
-#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
-
-#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
-#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
-#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
-#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
-#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
-#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
-#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
-#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
-
-#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
-#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
-#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
-#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
-#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
-#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
-#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
-#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
-
-#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
-#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
-#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
-#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
-#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
-#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
-#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
-#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
-
-#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
-#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
-#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
-#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
-#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
-#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
-#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
-#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
-
-#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
-#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
-#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
-#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
-#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
-#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
-#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
-#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
-#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
-#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
-#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
-#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
-#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
-#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
-#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
-#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
-#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
-#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
-#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
-#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
-#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
-#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
-#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
-#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
-#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
-#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
-#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
-#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
-#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
-#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
-#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
-#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
-#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
-#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
-#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
-#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
-#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
-
-
-/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
-#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
-#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
-#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
-
-
-/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
-#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
-#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
-#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
-#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
-#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
-#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
-#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
-#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
-#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
-#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
-#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
-#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
-#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
-#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
-#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
-#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
-#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
-#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
-#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
-#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
-#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
-#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
-#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
-#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
-#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
-#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
-#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
-#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
-#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
-#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
-#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
-#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
-#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
-#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
-#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
-#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
-#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
-#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
-#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
-#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
-#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
-#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
-#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
-#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
-#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
-#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
-#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
-#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
-#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
-#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
-#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
-#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
-#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
-#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
-#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
-#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
-#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
-
-
-/* DMA Traffic Control Registers */
-#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER, val)
-#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT, val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER, val)
-#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT, val)
-
-/* DMA Controller */
-#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
-#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
-#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
-#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
-#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
-#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
-#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
-#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
-#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
-#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
-#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
-#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
-#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
-#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
-#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
-#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
-#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
-#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
-#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
-#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
-#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
-#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
-#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
-#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
-#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
-#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
-#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
-#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
-#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
-#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
-#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
-#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
-#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
-#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
-#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
-#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
-#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
-#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
-#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
-#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
-#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
-#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
-#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
-#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
-#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
-#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
-#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
-#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
-#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
-#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
-#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
-#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
-#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
-#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
-#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
-#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
-#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
-#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
-#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
-#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
-#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
-#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
-#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
-#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
-#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
-#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
-#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
-#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
-#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
-#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
-#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
-#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
-#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
-#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
-#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
-#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
-#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
-#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
-#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
-#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
-#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
-#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
-#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
-#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
-#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
-#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
-#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
-#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
-#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
-#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
-#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
-#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
-#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
-#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
-#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
-#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
-#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
-#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
-#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
-#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
-#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
-#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
-#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
-#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
-#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
-#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
-#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
-#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
-#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
-#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
-#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
-#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
-#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
-#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
-#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
-#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
-#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
-#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
-#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
-#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
-#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
-#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
-#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
-#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
-#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
-#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
-#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
-#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
-#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
-#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
-#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
-#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
-#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
-#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
-#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
-#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
-#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
-#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
-#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
-#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
-#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
-#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
-#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
-#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
-#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
-#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
-#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
-#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
-#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
-#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
-#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
-#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
-#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
-#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
-#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
-#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
-#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
-#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
-#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
-#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
-#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
-#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
-#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
-#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
-#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
-#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
-#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
-#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
-#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
-#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
-#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
-#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
-#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
-#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
-#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
-#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
-#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
-#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
-#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
-#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
-#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
-#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
-#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
-#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
-#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
-#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
-#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
-#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
-#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
-#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
-#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
-#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
-#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
-#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
-#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
-#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
-#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
-#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
-#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
-#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
-#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
-#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
-#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
-#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
-#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
-#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
-#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
-#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
-#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
-#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
-#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
-#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
-#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
-#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
-#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
-#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
-#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
-#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
-#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
-#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
-#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
-#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
-#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
-#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
-#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
-#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
-#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
-#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
-#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
-#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
-#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
-#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
-#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
-#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
-#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
-#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
-#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
-#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
-#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
-#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
-#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
-#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
-#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
-#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
-#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
-#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
-#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
-#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
-#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
-#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
-#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
-#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
-#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
-#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
-#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
-#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
-#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
-#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
-#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
-#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
-#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
-#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
-#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
-#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
-#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
-#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
-#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
-#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
-#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
-#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
-#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
-#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
-#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
-#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
-#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
-#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
-#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
-#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
-#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
-#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
-#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
-#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
-#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
-#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
-#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
-#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
-#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
-#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
-#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
-#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
-#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
-#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
-#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
-#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
-#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
-#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
-#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
-#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
-#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
-#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
-#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
-#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
-#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
-#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
-#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
-#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
-#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
-#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
-#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
-#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
-#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
-#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
-#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
-#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
-#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
-#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
-#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
-#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
-#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
-#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
-#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
-#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
-#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
-#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
-#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
-#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
-#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
-#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
-#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
-#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
-#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
-#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
-#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
-#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
-#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
-#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
-#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
-#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
-#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
-#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
-#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
-#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
-#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
-#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
-#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
-#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
-
-
-/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
-#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
-#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
-#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
-#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
-#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
-#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
-#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
-#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
-#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
-#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
-#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
-#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
-#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
-#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
-#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
-#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
-#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
-#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
-#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
-#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
-#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
-#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
-#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
-#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
-#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
-#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
-#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
-#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
-#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
-#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
-#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
-#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
-#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
-#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
-#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
-#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
-#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
-#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
-#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
-#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
-#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
-#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
-#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
-#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
-#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
-#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
-#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
-#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
-#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
-#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
-#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
-#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
-#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
-#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
-#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
-#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
-#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
-#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
-#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
-#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
-#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
-#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
-
-
-/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
-#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
-#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
-#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
-#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
-#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
-#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
-#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
-#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
-#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
-#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
-#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
-#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
-#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
-#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
-#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
-#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
-#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
-#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
-#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
-#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
-#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
-#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
-#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
-#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
-
-/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
-
-/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
-#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
-#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
-#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
-#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
-#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
-#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
-#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
-#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
-
-
-/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
-#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
-
-#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
-
-/* ==== end from cdefBF534.h ==== */
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-
-#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
-#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
-#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
-#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
-#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
-#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
-
-#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
-#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
-#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
-#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
-#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
-#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
-#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
-#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
-#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
-#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
-#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
-#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
-#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
-#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
-#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
-#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
-#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
-#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
-#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
-#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
-#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
-#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
-#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
-#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
-
-/* Counter Registers */
-
-#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
-#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
-#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
-#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
-#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
-#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
-#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
-#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
-#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
-#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
-#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
-#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
-#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
-#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
-#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
-#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
-
-/* Security Registers */
-
-#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
-#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
-#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
-#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
-#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
-#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
-
-/* NFC Registers */
-
-#define bfin_read_NFC_CTL() bfin_read16(NFC_CTL)
-#define bfin_write_NFC_CTL(val) bfin_write16(NFC_CTL, val)
-#define bfin_read_NFC_STAT() bfin_read16(NFC_STAT)
-#define bfin_write_NFC_STAT(val) bfin_write16(NFC_STAT, val)
-#define bfin_read_NFC_IRQSTAT() bfin_read16(NFC_IRQSTAT)
-#define bfin_write_NFC_IRQSTAT(val) bfin_write16(NFC_IRQSTAT, val)
-#define bfin_read_NFC_IRQMASK() bfin_read16(NFC_IRQMASK)
-#define bfin_write_NFC_IRQMASK(val) bfin_write16(NFC_IRQMASK, val)
-#define bfin_read_NFC_ECC0() bfin_read16(NFC_ECC0)
-#define bfin_write_NFC_ECC0(val) bfin_write16(NFC_ECC0, val)
-#define bfin_read_NFC_ECC1() bfin_read16(NFC_ECC1)
-#define bfin_write_NFC_ECC1(val) bfin_write16(NFC_ECC1, val)
-#define bfin_read_NFC_ECC2() bfin_read16(NFC_ECC2)
-#define bfin_write_NFC_ECC2(val) bfin_write16(NFC_ECC2, val)
-#define bfin_read_NFC_ECC3() bfin_read16(NFC_ECC3)
-#define bfin_write_NFC_ECC3(val) bfin_write16(NFC_ECC3, val)
-#define bfin_read_NFC_COUNT() bfin_read16(NFC_COUNT)
-#define bfin_write_NFC_COUNT(val) bfin_write16(NFC_COUNT, val)
-#define bfin_read_NFC_RST() bfin_read16(NFC_RST)
-#define bfin_write_NFC_RST(val) bfin_write16(NFC_RST, val)
-#define bfin_read_NFC_PGCTL() bfin_read16(NFC_PGCTL)
-#define bfin_write_NFC_PGCTL(val) bfin_write16(NFC_PGCTL, val)
-#define bfin_read_NFC_READ() bfin_read16(NFC_READ)
-#define bfin_write_NFC_READ(val) bfin_write16(NFC_READ, val)
-#define bfin_read_NFC_ADDR() bfin_read16(NFC_ADDR)
-#define bfin_write_NFC_ADDR(val) bfin_write16(NFC_ADDR, val)
-#define bfin_read_NFC_CMD() bfin_read16(NFC_CMD)
-#define bfin_write_NFC_CMD(val) bfin_write16(NFC_CMD, val)
-#define bfin_read_NFC_DATA_WR() bfin_read16(NFC_DATA_WR)
-#define bfin_write_NFC_DATA_WR(val) bfin_write16(NFC_DATA_WR, val)
-#define bfin_read_NFC_DATA_RD() bfin_read16(NFC_DATA_RD)
-#define bfin_write_NFC_DATA_RD(val) bfin_write16(NFC_DATA_RD, val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
-#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF522.h b/arch/blackfin/mach-bf527/include/mach/defBF522.h
index cb139a25481..89f5420ee6c 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF522.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,12 +7,1393 @@
#ifndef _DEF_BF522_H
#define _DEF_BF522_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
+/* ************************************************************** */
+/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x */
+/* ************************************************************** */
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */
+/* ==== begin from defBF534.h ==== */
-/* Include defBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "defBF52x_base.h"
+/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
+#define PLL_CTL 0xFFC00000 /* PLL Control Register */
+#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
+#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
+#define PLL_STAT 0xFFC0000C /* PLL Status Register */
+#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
+#define CHIPID 0xFFC00014 /* Device ID Register */
+
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define SWRST 0xFFC00100 /* Software Reset Register */
+#define SYSCR 0xFFC00104 /* System Configuration Register */
+#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
+
+#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
+#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
+#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
+#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
+#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
+#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
+#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
+
+/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
+#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
+#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
+#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
+#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
+#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
+#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
+#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
+
+
+/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
+#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
+#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
+#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
+
+
+/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define RTC_STAT 0xFFC00300 /* RTC Status Register */
+#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
+#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
+#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
+#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
+#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
+#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
+
+
+/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
+#define UART0_THR 0xFFC00400 /* Transmit Holding register */
+#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
+#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
+#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
+#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
+#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
+#define UART0_LCR 0xFFC0040C /* Line Control Register */
+#define UART0_MCR 0xFFC00410 /* Modem Control Register */
+#define UART0_LSR 0xFFC00414 /* Line Status Register */
+#define UART0_MSR 0xFFC00418 /* Modem Status Register */
+#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
+#define UART0_GCTL 0xFFC00424 /* Global Control Register */
+
+
+/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
+#define SPI0_REGBASE 0xFFC00500
+#define SPI_CTL 0xFFC00500 /* SPI Control Register */
+#define SPI_FLG 0xFFC00504 /* SPI Flag register */
+#define SPI_STAT 0xFFC00508 /* SPI Status register */
+#define SPI_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
+#define SPI_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
+#define SPI_BAUD 0xFFC00514 /* SPI Baud rate Register */
+#define SPI_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
+
+
+/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
+#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
+#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
+#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
+#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
+
+#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
+#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
+#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
+#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
+
+#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
+#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
+#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
+#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
+
+#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
+#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
+#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
+#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
+
+#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
+#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
+#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
+#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
+
+#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
+#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
+#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
+#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
+
+#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
+#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
+#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
+#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
+
+#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
+#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
+#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
+#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
+
+#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
+#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
+#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
+#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
+#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
+#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
+#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
+#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
+#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
+#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
+#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
+#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
+#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
+#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
+#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
+#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
+#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
+#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
+#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
+#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
+
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
+#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
+#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
+#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
+#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
+#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
+#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
+#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
+#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
+#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
+#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
+#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
+#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
+#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
+#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
+#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
+#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
+#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
+#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
+#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
+#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
+#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
+
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
+#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
+#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
+#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
+#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
+#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
+#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
+#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
+#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
+#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
+#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
+#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
+#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
+#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
+#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
+#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
+#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
+#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
+#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
+#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
+#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
+#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
+#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
+#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
+#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
+#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
+#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
+#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
+
+
+/* DMA Traffic Control Registers */
+#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
+#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
+
+/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
+#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
+#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
+#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
+#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
+#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
+#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
+#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
+#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
+#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
+#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
+#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
+#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
+#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
+
+#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
+#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
+#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
+#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
+#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
+#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
+#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
+#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
+#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
+#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
+#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
+#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
+#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
+
+#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
+#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
+#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
+#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
+#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
+#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
+#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
+#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
+#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
+#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
+#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
+#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
+#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
+
+#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
+#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
+#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
+#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
+#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
+#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
+#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
+#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
+#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
+#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
+#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
+#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
+#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
+
+#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
+#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
+#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
+#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
+#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
+#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
+#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
+#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
+#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
+#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
+#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
+#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
+#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
+
+#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
+#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
+#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
+#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
+#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
+#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
+#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
+#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
+#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
+#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
+#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
+#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
+#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
+
+#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
+#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
+#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
+#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
+#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
+#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
+#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
+#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
+#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
+#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
+#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
+#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
+#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
+
+#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
+#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
+#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
+#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
+#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
+#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
+#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
+#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
+#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
+#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
+#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
+#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
+#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
+
+#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
+#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
+#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
+#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
+#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
+#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
+#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
+#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
+#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
+#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
+#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
+#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
+#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
+
+#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
+#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
+#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
+#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
+#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
+#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
+#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
+#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
+#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
+#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
+#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
+#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
+#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
+
+#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
+#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
+#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
+#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
+#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
+#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
+#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
+#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
+#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
+#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
+#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
+#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
+#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
+
+#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
+#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
+#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
+#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
+#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
+#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
+#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
+#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
+#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
+#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
+#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
+#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
+#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
+
+#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
+#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
+#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
+#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
+#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
+#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
+#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
+#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
+#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
+#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
+#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
+#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
+#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
+
+#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
+#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
+#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
+#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
+#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
+#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
+#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
+#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
+#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
+#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
+#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
+#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
+#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
+
+#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
+#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
+#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
+#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
+#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
+#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
+#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
+#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
+#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
+#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
+#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
+#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
+#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
+
+#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
+#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
+#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
+#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
+#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
+#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
+#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
+#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
+#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
+#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
+#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
+#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
+#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
+#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
+#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
+#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
+#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
+#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
+
+
+/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
+#define TWI0_REGBASE 0xFFC01400
+#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
+#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
+#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
+#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
+#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
+#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
+#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
+#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
+#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
+#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
+#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
+#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
+#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
+#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
+#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
+#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
+
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
+#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
+#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
+#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
+#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
+#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
+#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
+#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
+#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
+#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
+#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
+#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
+#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
+#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
+#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
+#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
+#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
+#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
+#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
+#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
+#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
+#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
+#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
+#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
+#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
+#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
+#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
+#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
+#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
+#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
+#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
+#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
+#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
+#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
+#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
+
+
+/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
+#define UART1_THR 0xFFC02000 /* Transmit Holding register */
+#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
+#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
+#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
+#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
+#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
+#define UART1_LCR 0xFFC0200C /* Line Control Register */
+#define UART1_MCR 0xFFC02010 /* Modem Control Register */
+#define UART1_LSR 0xFFC02014 /* Line Status Register */
+#define UART1_MSR 0xFFC02018 /* Modem Status Register */
+#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
+#define UART1_GCTL 0xFFC02024 /* Global Control Register */
+
+
+/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
+
+/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
+#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
+#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
+#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
+#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
+
+
+/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
+#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
+#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
+#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
+#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
+#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
+#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
+#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
+
+#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
+#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
+#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
+#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
+#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
+#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
+#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+#define PORTF_MUX 0xFFC03210 /* Port F mux control */
+#define PORTG_MUX 0xFFC03214 /* Port G mux control */
+#define PORTH_MUX 0xFFC03218 /* Port H mux control */
+#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
+#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
+#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
+#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
+#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
+#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
+#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
+#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
+#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
+#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
+#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
+#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
+
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer: All macros are intended to make C and Assembly code more readable.
+** Use these macros carefully, as any that do left shifts for field
+** depositing will result in the lower order bits being destroyed. Any
+** macro that shifts left to properly position the bit-field should be
+** used as part of an OR to initialize a register and NOT as a dynamic
+** modifier UNLESS the lower order bits are saved and ORed back in when
+** the macro is used.
+*************************************************************************************/
+
+/* CHIPID Masks */
+#define CHIPID_VERSION 0xF0000000
+#define CHIPID_FAMILY 0x0FFFF000
+#define CHIPID_MANUFACTURE 0x00000FFE
+
+/* SWRST Masks */
+#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
+#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
+#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
+#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
+#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
+
+/* SYSCR Masks */
+#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
+#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
+
+
+/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
+/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
+
+#if 0
+#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
+
+#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
+#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
+#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
+#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
+#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
+#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
+#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
+
+#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
+#define IRQ_TWI 0x00000200 /* TWI Interrupt */
+#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
+#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
+#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
+#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
+#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
+#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
+
+#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
+#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
+#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
+#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
+#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
+#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
+#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
+#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
+#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
+#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
+
+#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
+#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
+#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
+#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
+#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
+#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
+#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
+#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
+#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
+#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
+#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
+#endif
+
+/* SIC_IAR0 Macros */
+#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
+#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
+#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
+#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
+#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
+#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
+#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
+#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
+
+/* SIC_IAR1 Macros */
+#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
+#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
+#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
+#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
+#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
+#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
+#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
+#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
+
+/* SIC_IAR2 Macros */
+#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
+#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
+#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
+#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
+#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
+#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
+#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
+#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
+
+/* SIC_IAR3 Macros */
+#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
+#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
+#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
+#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
+#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
+#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
+#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
+#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
+
+
+/* SIC_IMASK Masks */
+#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
+#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
+#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
+#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
+
+/* SIC_IWR Masks */
+#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
+#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
+#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
+#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
+
+/* **************** GENERAL PURPOSE TIMER MASKS **********************/
+/* TIMER_ENABLE Masks */
+#define TIMEN0 0x0001 /* Enable Timer 0 */
+#define TIMEN1 0x0002 /* Enable Timer 1 */
+#define TIMEN2 0x0004 /* Enable Timer 2 */
+#define TIMEN3 0x0008 /* Enable Timer 3 */
+#define TIMEN4 0x0010 /* Enable Timer 4 */
+#define TIMEN5 0x0020 /* Enable Timer 5 */
+#define TIMEN6 0x0040 /* Enable Timer 6 */
+#define TIMEN7 0x0080 /* Enable Timer 7 */
+
+/* TIMER_DISABLE Masks */
+#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
+#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
+#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
+#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
+#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
+#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
+#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
+#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
+
+/* TIMER_STATUS Masks */
+#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
+#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
+#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
+#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
+#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
+#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
+#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
+#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
+#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
+#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
+#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
+#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
+#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
+#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
+#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
+#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
+#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
+#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
+#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
+#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
+#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
+#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
+#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
+#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR3 TOVF_ERR3
+#define TOVL_ERR4 TOVF_ERR4
+#define TOVL_ERR5 TOVF_ERR5
+#define TOVL_ERR6 TOVF_ERR6
+#define TOVL_ERR7 TOVF_ERR7
+
+/* TIMERx_CONFIG Masks */
+#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
+#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
+#define EXT_CLK 0x0003 /* External Clock Mode */
+#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
+#define PERIOD_CNT 0x0008 /* Period Count */
+#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
+#define TIN_SEL 0x0020 /* Timer Input Select */
+#define OUT_DIS 0x0040 /* Output Pad Disable */
+#define CLK_SEL 0x0080 /* Timer Clock Select */
+#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
+#define EMU_RUN 0x0200 /* Emulation Behavior Select */
+#define ERR_TYP 0xC000 /* Error Type */
+
+/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
+/* EBIU_AMGCTL Masks */
+#define AMCKEN 0x0001 /* Enable CLKOUT */
+#define AMBEN_NONE 0x0000 /* All Banks Disabled */
+#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
+#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
+#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
+#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
+
+/* EBIU_AMBCTL0 Masks */
+#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
+#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
+#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
+#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
+#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
+#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
+#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
+#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
+#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
+#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
+#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
+#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
+#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
+#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
+#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
+#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
+#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
+#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
+#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
+#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
+#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
+#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
+#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
+#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
+#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
+#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
+#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
+#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
+#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
+#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
+#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
+#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
+#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
+#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
+#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
+#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
+
+#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
+#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
+#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
+#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
+#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
+#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
+#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
+#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
+#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
+#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
+#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
+#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
+#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
+#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
+#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
+#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
+#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
+#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
+#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
+#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
+#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
+#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
+#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
+#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
+#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
+#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
+#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
+#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
+#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
+#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
+#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
+#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
+#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
+#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
+#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
+#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
+
+/* EBIU_AMBCTL1 Masks */
+#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
+#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
+#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
+#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
+#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
+#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
+#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
+#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
+#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
+#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
+#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
+#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
+#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
+#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
+#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
+#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
+#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
+#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
+#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
+#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
+#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
+#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
+#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
+#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
+#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
+#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
+#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
+#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
+#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
+#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
+#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
+#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
+#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
+#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
+#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
+#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
+
+#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
+#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
+#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
+#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
+#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
+#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
+#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
+#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
+#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
+#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
+#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
+#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
+#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
+#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
+#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
+#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
+#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
+#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
+#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
+#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
+#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
+#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
+#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
+#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
+#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
+#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
+#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
+#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
+#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
+#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
+#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
+#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
+#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
+#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
+#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
+#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
+#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
+#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
+#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
+#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
+#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
+#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
+#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
+#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
+
+
+/* ********************** SDRAM CONTROLLER MASKS **********************************************/
+/* EBIU_SDGCTL Masks */
+#define SCTLE 0x00000001 /* Enable SDRAM Signals */
+#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
+#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
+#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
+#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
+#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
+#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
+#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
+#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
+#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
+#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
+#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
+#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
+#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
+#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
+#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
+#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
+#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
+#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
+#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
+#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
+#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
+#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
+#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
+#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
+#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
+#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
+#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
+#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
+#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
+#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
+#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
+#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
+#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
+#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
+#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
+#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
+#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
+#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
+#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
+#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
+#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
+#define EBUFE 0x02000000 /* Enable External Buffering Timing */
+#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
+#define EMREN 0x10000000 /* Extended Mode Register Enable */
+#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
+#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
+
+/* EBIU_SDBCTL Masks */
+#define EBE 0x0001 /* Enable SDRAM External Bank */
+#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
+#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
+#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
+#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
+#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
+#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
+#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
+#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
+#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
+#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
+
+/* EBIU_SDSTAT Masks */
+#define SDCI 0x0001 /* SDRAM Controller Idle */
+#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
+#define SDPUA 0x0004 /* SDRAM Power-Up Active */
+#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
+#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
+#define BGSTAT 0x0020 /* Bus Grant Status */
+
+
+/* ************************** DMA CONTROLLER MASKS ********************************/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
+#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
+#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
+#define PMAP_PPI 0x0000 /* PPI Port DMA */
+#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
+#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
+#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
+#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
+#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
+#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
+#define PMAP_SPI 0x7000 /* SPI Port DMA */
+#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
+#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
+#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
+#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
+
+/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
+/* PPI_CONTROL Masks */
+#define PORT_EN 0x0001 /* PPI Port Enable */
+#define PORT_DIR 0x0002 /* PPI Port Direction */
+#define XFR_TYPE 0x000C /* PPI Transfer Type */
+#define PORT_CFG 0x0030 /* PPI Port Configuration */
+#define FLD_SEL 0x0040 /* PPI Active Field Select */
+#define PACK_EN 0x0080 /* PPI Packing Mode */
+#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
+#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
+#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
+#define DLEN_8 0x0000 /* Data Length = 8 Bits */
+#define DLEN_10 0x0800 /* Data Length = 10 Bits */
+#define DLEN_11 0x1000 /* Data Length = 11 Bits */
+#define DLEN_12 0x1800 /* Data Length = 12 Bits */
+#define DLEN_13 0x2000 /* Data Length = 13 Bits */
+#define DLEN_14 0x2800 /* Data Length = 14 Bits */
+#define DLEN_15 0x3000 /* Data Length = 15 Bits */
+#define DLEN_16 0x3800 /* Data Length = 16 Bits */
+#define DLENGTH 0x3800 /* PPI Data Length */
+#define POLC 0x4000 /* PPI Clock Polarity */
+#define POLS 0x8000 /* PPI Frame Sync Polarity */
+
+/* PPI_STATUS Masks */
+#define FLD 0x0400 /* Field Indicator */
+#define FT_ERR 0x0800 /* Frame Track Error */
+#define OVR 0x1000 /* FIFO Overflow Error */
+#define UNDR 0x2000 /* FIFO Underrun Error */
+#define ERR_DET 0x4000 /* Error Detected Indicator */
+#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
+
+
+/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
+#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
+#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
+
+/* TWI_PRESCALE Masks */
+#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
+#define TWI_ENA 0x0080 /* TWI Enable */
+#define SCCB 0x0200 /* SCCB Compatibility Enable */
+
+/* TWI_SLAVE_CTL Masks */
+#define SEN 0x0001 /* Slave Enable */
+#define SADD_LEN 0x0002 /* Slave Address Length */
+#define STDVAL 0x0004 /* Slave Transmit Data Valid */
+#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
+#define GEN 0x0010 /* General Call Adrress Matching Enabled */
+
+/* TWI_SLAVE_STAT Masks */
+#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
+#define GCALL 0x0002 /* General Call Indicator */
+
+/* TWI_MASTER_CTL Masks */
+#define MEN 0x0001 /* Master Mode Enable */
+#define MADD_LEN 0x0002 /* Master Address Length */
+#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
+#define FAST 0x0008 /* Use Fast Mode Timing Specs */
+#define STOP 0x0010 /* Issue Stop Condition */
+#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
+#define DCNT 0x3FC0 /* Data Bytes To Transfer */
+#define SDAOVR 0x4000 /* Serial Data Override */
+#define SCLOVR 0x8000 /* Serial Clock Override */
+
+/* TWI_MASTER_STAT Masks */
+#define MPROG 0x0001 /* Master Transfer In Progress */
+#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
+#define ANAK 0x0004 /* Address Not Acknowledged */
+#define DNAK 0x0008 /* Data Not Acknowledged */
+#define BUFRDERR 0x0010 /* Buffer Read Error */
+#define BUFWRERR 0x0020 /* Buffer Write Error */
+#define SDASEN 0x0040 /* Serial Data Sense */
+#define SCLSEN 0x0080 /* Serial Clock Sense */
+#define BUSBUSY 0x0100 /* Bus Busy Indicator */
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
+#define SINIT 0x0001 /* Slave Transfer Initiated */
+#define SCOMP 0x0002 /* Slave Transfer Complete */
+#define SERR 0x0004 /* Slave Transfer Error */
+#define SOVF 0x0008 /* Slave Overflow */
+#define MCOMP 0x0010 /* Master Transfer Complete */
+#define MERR 0x0020 /* Master Transfer Error */
+#define XMTSERV 0x0040 /* Transmit FIFO Service */
+#define RCVSERV 0x0080 /* Receive FIFO Service */
+
+/* TWI_FIFO_CTRL Masks */
+#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
+#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
+#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
+#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
+
+/* TWI_FIFO_STAT Masks */
+#define XMTSTAT 0x0003 /* Transmit FIFO Status */
+#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
+#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
+#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
+
+#define RCVSTAT 0x000C /* Receive FIFO Status */
+#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
+#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
+#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
+
+
+/* Omit CAN masks from defBF534.h */
+
+/* ******************* PIN CONTROL REGISTER MASKS ************************/
+/* PORT_MUX Masks */
+#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
+#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
+#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
+
+#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
+#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
+#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
+#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
+
+#define PFDE 0x0008 /* Port F DMA Request Enable */
+#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
+#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
+
+#define PFTE 0x0010 /* Port F Timer Enable */
+#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
+#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
+
+#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
+#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
+#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
+
+#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
+#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
+#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
+
+#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
+#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
+#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
+
+#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
+#define PFFE_TIMER 0x0000 /* Enable TMR2 */
+#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
+
+#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
+#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
+#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
+
+#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
+#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
+#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
+
+#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
+#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
+#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
+
+
+/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
+/* HDMAx_CTL Masks */
+#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
+#define REP 0x0002 /* HDMA Request Polarity */
+#define UTE 0x0004 /* Urgency Threshold Enable */
+#define OIE 0x0010 /* Overflow Interrupt Enable */
+#define BDIE 0x0020 /* Block Done Interrupt Enable */
+#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
+#define DRQ 0x0300 /* HDMA Request Type */
+#define DRQ_NONE 0x0000 /* No Request */
+#define DRQ_SINGLE 0x0100 /* Channels Request Single */
+#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
+#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
+#define RBC 0x1000 /* Reload BCNT With IBCNT */
+#define PS 0x2000 /* HDMA Pin Status */
+#define OI 0x4000 /* Overflow Interrupt Generated */
+#define BDI 0x8000 /* Block Done Interrupt Generated */
+
+/* entry addresses of the user-callable Boot ROM functions */
+
+#define _BOOTROM_RESET 0xEF000000
+#define _BOOTROM_FINAL_INIT 0xEF000002
+#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
+#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
+#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
+#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
+#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
+#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
+#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define PGDE_UART PFDE_UART
+#define PGDE_DMA PFDE_DMA
+#define CKELOW SCKELOW
+
+/* ==== end from defBF534.h ==== */
+
+/* HOST Port Registers */
+
+#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
+#define HOST_STATUS 0xffc03404 /* HOST Status Register */
+#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
+
+/* Counter Registers */
+
+#define CNT_CONFIG 0xffc03500 /* Configuration Register */
+#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
+#define CNT_STATUS 0xffc03508 /* Status Register */
+#define CNT_COMMAND 0xffc0350c /* Command Register */
+#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
+#define CNT_COUNTER 0xffc03514 /* Counter Register */
+#define CNT_MAX 0xffc03518 /* Maximal Count Register */
+#define CNT_MIN 0xffc0351c /* Minimal Count Register */
+
+/* OTP/FUSE Registers */
+
+#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
+#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
+#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
+#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
+
+/* Security Registers */
+
+#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
+#define SECURE_CONTROL 0xffc03624 /* Secure Control */
+#define SECURE_STATUS 0xffc03628 /* Secure Status */
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+
+/* NFC Registers */
+
+#define NFC_CTL 0xffc03700 /* NAND Control Register */
+#define NFC_STAT 0xffc03704 /* NAND Status Register */
+#define NFC_IRQSTAT 0xffc03708 /* NAND Interrupt Status Register */
+#define NFC_IRQMASK 0xffc0370c /* NAND Interrupt Mask Register */
+#define NFC_ECC0 0xffc03710 /* NAND ECC Register 0 */
+#define NFC_ECC1 0xffc03714 /* NAND ECC Register 1 */
+#define NFC_ECC2 0xffc03718 /* NAND ECC Register 2 */
+#define NFC_ECC3 0xffc0371c /* NAND ECC Register 3 */
+#define NFC_COUNT 0xffc03720 /* NAND ECC Count Register */
+#define NFC_RST 0xffc03724 /* NAND ECC Reset Register */
+#define NFC_PGCTL 0xffc03728 /* NAND Page Control Register */
+#define NFC_READ 0xffc0372c /* NAND Read Data Register */
+#define NFC_ADDR 0xffc03740 /* NAND Address Register */
+#define NFC_CMD 0xffc03744 /* NAND Command Register */
+#define NFC_DATA_WR 0xffc03748 /* NAND Data Write Register */
+#define NFC_DATA_RD 0xffc0374c /* NAND Data Read Register */
+
+/* ********************************************************** */
+/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
+/* and MULTI BIT READ MACROS */
+/* ********************************************************** */
+
+/* Bit masks for HOST_CONTROL */
+
+#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
+#define HOST_CNTR_nHOST_EN 0x0
+#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
+#define HOST_CNTR_nHOST_END 0x0
+#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
+#define HOST_CNTR_nDATA_SIZE 0x0
+#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
+#define HOST_CNTR_nHOST_RST 0x0
+#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
+#define HOST_CNTR_nHRDY_OVR 0x0
+#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
+#define HOST_CNTR_nINT_MODE 0x0
+#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
+#define HOST_CNTR_ nBT_EN 0x0
+#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
+#define HOST_CNTR_nEHW 0x0
+#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
+#define HOST_CNTR_nEHR 0x0
+#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
+#define HOST_CNTR_nBDR 0x0
+
+/* Bit masks for HOST_STATUS */
+
+#define HOST_STAT_READY 0x1 /* DMA Ready */
+#define HOST_STAT_nREADY 0x0
+#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
+#define HOST_STAT_nFIFOFULL 0x0
+#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
+#define HOST_STAT_nFIFOEMPTY 0x0
+#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
+#define HOST_STAT_nCOMPLETE 0x0
+#define HOST_STAT_HSHK 0x10 /* Host Handshake */
+#define HOST_STAT_nHSHK 0x0
+#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
+#define HOST_STAT_nTIMEOUT 0x0
+#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
+#define HOST_STAT_nHIRQ 0x0
+#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
+#define HOST_STAT_nALLOW_CNFG 0x0
+#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
+#define HOST_STAT_nDMA_DIR 0x0
+#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
+#define HOST_STAT_nBTE 0x0
+#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
+#define HOST_STAT_nHOSTRD_DONE 0x0
+
+/* Bit masks for HOST_TIMEOUT */
+
+#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
+
+/* Bit masks for SECURE_SYSSWT */
+
+#define EMUDABL 0x1 /* Emulation Disable. */
+#define nEMUDABL 0x0
+#define RSTDABL 0x2 /* Reset Disable */
+#define nRSTDABL 0x0
+#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
+#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
+#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
+#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
+#define nDMA0OVR 0x0
+#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
+#define nDMA1OVR 0x0
+#define EMUOVR 0x4000 /* Emulation Override */
+#define nEMUOVR 0x0
+#define OTPSEN 0x8000 /* OTP Secrets Enable. */
+#define nOTPSEN 0x0
+#define L2DABL 0x70000 /* L2 Memory Disable. */
+
+/* Bit masks for SECURE_CONTROL */
+
+#define SECURE0 0x1 /* SECURE 0 */
+#define nSECURE0 0x0
+#define SECURE1 0x2 /* SECURE 1 */
+#define nSECURE1 0x0
+#define SECURE2 0x4 /* SECURE 2 */
+#define nSECURE2 0x0
+#define SECURE3 0x8 /* SECURE 3 */
+#define nSECURE3 0x0
+
+/* Bit masks for SECURE_STATUS */
+
+#define SECMODE 0x3 /* Secured Mode Control State */
+#define NMI 0x4 /* Non Maskable Interrupt */
+#define nNMI 0x0
+#define AFVALID 0x8 /* Authentication Firmware Valid */
+#define nAFVALID 0x0
+#define AFEXIT 0x10 /* Authentication Firmware Exit */
+#define nAFEXIT 0x0
+#define SECSTAT 0xe0 /* Secure Status */
#endif /* _DEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index c136f703296..cc383adfdff 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF527.h b/arch/blackfin/mach-bf527/include/mach/defBF527.h
index 4dd58fb3315..05369a92fbc 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF527.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
deleted file mode 100644
index 09475034c6a..00000000000
--- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ /dev/null
@@ -1,1506 +0,0 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
- */
-
-#ifndef _DEF_BF52X_H
-#define _DEF_BF52X_H
-
-
-/* ************************************************************** */
-/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x */
-/* ************************************************************** */
-
-/* ==== begin from defBF534.h ==== */
-
-/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
-#define PLL_CTL 0xFFC00000 /* PLL Control Register */
-#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
-#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
-#define PLL_STAT 0xFFC0000C /* PLL Status Register */
-#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
-#define CHIPID 0xFFC00014 /* Device ID Register */
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
-#define SWRST 0xFFC00100 /* Software Reset Register */
-#define SYSCR 0xFFC00104 /* System Configuration Register */
-#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
-
-#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
-#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
-#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
-#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
-#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
-#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
-#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
-
-/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
-#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
-#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
-#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
-#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
-#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
-#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
-#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
-
-
-/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
-#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
-#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
-#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
-
-
-/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
-#define RTC_STAT 0xFFC00300 /* RTC Status Register */
-#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
-#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
-#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
-#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
-#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
-#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
-
-
-/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
-#define UART0_THR 0xFFC00400 /* Transmit Holding register */
-#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
-#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
-#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
-#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
-#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
-#define UART0_LCR 0xFFC0040C /* Line Control Register */
-#define UART0_MCR 0xFFC00410 /* Modem Control Register */
-#define UART0_LSR 0xFFC00414 /* Line Status Register */
-#define UART0_MSR 0xFFC00418 /* Modem Status Register */
-#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
-#define UART0_GCTL 0xFFC00424 /* Global Control Register */
-
-
-/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
-#define SPI0_REGBASE 0xFFC00500
-#define SPI_CTL 0xFFC00500 /* SPI Control Register */
-#define SPI_FLG 0xFFC00504 /* SPI Flag register */
-#define SPI_STAT 0xFFC00508 /* SPI Status register */
-#define SPI_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
-#define SPI_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
-#define SPI_BAUD 0xFFC00514 /* SPI Baud rate Register */
-#define SPI_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
-
-
-/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
-#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
-#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
-#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
-#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
-
-#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
-#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
-#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
-#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
-
-#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
-#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
-#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
-#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
-
-#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
-#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
-#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
-#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
-
-#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
-#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
-#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
-#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
-
-#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
-#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
-#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
-#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
-
-#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
-#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
-#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
-#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
-
-#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
-#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
-#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
-#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
-
-#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
-#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
-#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
-#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
-#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
-#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
-#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
-#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
-#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
-#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
-#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
-#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
-#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
-#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
-#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
-#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
-#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
-#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
-#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
-#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
-
-
-/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
-#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
-#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
-#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
-#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
-#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
-#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
-#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
-#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
-#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
-#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
-#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
-#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
-#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
-#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
-#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
-#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
-#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
-#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
-
-
-/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
-#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
-#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
-#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
-#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
-#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
-#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
-#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
-#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
-#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
-#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
-#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
-#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
-#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
-#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
-#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
-#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
-#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
-#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
-#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
-#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
-#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
-#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
-#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
-#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
-#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
-#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
-#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
-#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
-#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
-
-
-/* DMA Traffic Control Registers */
-#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
-
-/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
-#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
-#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
-#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
-#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
-#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
-#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
-#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
-#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
-#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
-#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
-#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
-#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
-#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
-
-#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
-#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
-#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
-#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
-#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
-#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
-#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
-#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
-#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
-#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
-#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
-#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
-#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
-
-#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
-#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
-#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
-#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
-#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
-#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
-#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
-#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
-#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
-#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
-#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
-#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
-#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
-
-#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
-#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
-#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
-#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
-#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
-#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
-#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
-#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
-#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
-#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
-#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
-#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
-#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
-
-#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
-#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
-#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
-#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
-#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
-#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
-#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
-#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
-#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
-#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
-#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
-#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
-#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
-
-#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
-#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
-#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
-#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
-#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
-#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
-#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
-#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
-#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
-#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
-#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
-#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
-#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
-
-#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
-#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
-#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
-#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
-#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
-#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
-#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
-#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
-#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
-#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
-#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
-#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
-#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
-
-#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
-#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
-#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
-#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
-#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
-#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
-#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
-#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
-#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
-#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
-#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
-#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
-#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
-
-#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
-#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
-#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
-#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
-#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
-#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
-#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
-#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
-#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
-#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
-#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
-#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
-#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
-
-#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
-#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
-#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
-#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
-#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
-#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
-#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
-#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
-#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
-#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
-#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
-#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
-#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
-
-#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
-#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
-#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
-#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
-#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
-#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
-#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
-#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
-#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
-#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
-#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
-#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
-#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
-
-#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
-#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
-#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
-#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
-#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
-#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
-#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
-#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
-#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
-#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
-#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
-#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
-#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
-
-#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
-#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
-#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
-#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
-#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
-#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
-#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
-#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
-#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
-#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
-#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
-#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
-#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
-
-#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
-#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
-#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
-#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
-#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
-#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
-#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
-#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
-#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
-#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
-#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
-#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
-#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
-
-#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
-#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
-#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
-#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
-#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
-#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
-#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
-#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
-#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
-#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
-#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
-#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
-#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
-
-#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
-#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
-#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
-#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
-#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
-#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
-#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
-#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
-#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
-#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
-#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
-#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
-#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
-#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
-#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
-#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
-#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
-#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
-
-
-/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
-#define TWI0_REGBASE 0xFFC01400
-#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
-#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
-#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
-#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
-#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
-#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
-#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
-#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
-#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
-#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
-#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
-#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
-#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
-#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
-#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
-#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
-
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
-#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
-#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
-#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
-#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
-#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
-#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
-#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
-#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
-#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
-#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
-#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
-#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
-#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
-#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
-#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
-#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
-#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
-#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
-#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
-#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
-#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
-#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
-#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
-#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
-#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
-#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
-#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
-#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
-#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
-#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
-#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
-#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
-#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
-#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
-
-
-/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
-#define UART1_THR 0xFFC02000 /* Transmit Holding register */
-#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
-#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
-#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
-#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
-#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
-#define UART1_LCR 0xFFC0200C /* Line Control Register */
-#define UART1_MCR 0xFFC02010 /* Modem Control Register */
-#define UART1_LSR 0xFFC02014 /* Line Status Register */
-#define UART1_MSR 0xFFC02018 /* Modem Status Register */
-#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
-#define UART1_GCTL 0xFFC02024 /* Global Control Register */
-
-
-/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
-
-/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
-#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
-#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
-#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
-#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
-
-
-/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
-#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
-#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
-#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
-#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
-#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
-#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
-#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
-
-#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
-#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
-#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
-#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
-#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
-#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
-#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-#define PORTF_MUX 0xFFC03210 /* Port F mux control */
-#define PORTG_MUX 0xFFC03214 /* Port G mux control */
-#define PORTH_MUX 0xFFC03218 /* Port H mux control */
-#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
-#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
-#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
-#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
-#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
-#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
-#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
-#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
-#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
-#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
-#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
-#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
-
-
-/***********************************************************************************
-** System MMR Register Bits And Macros
-**
-** Disclaimer: All macros are intended to make C and Assembly code more readable.
-** Use these macros carefully, as any that do left shifts for field
-** depositing will result in the lower order bits being destroyed. Any
-** macro that shifts left to properly position the bit-field should be
-** used as part of an OR to initialize a register and NOT as a dynamic
-** modifier UNLESS the lower order bits are saved and ORed back in when
-** the macro is used.
-*************************************************************************************/
-
-/* CHIPID Masks */
-#define CHIPID_VERSION 0xF0000000
-#define CHIPID_FAMILY 0x0FFFF000
-#define CHIPID_MANUFACTURE 0x00000FFE
-
-/* SWRST Masks */
-#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
-#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
-#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
-#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
-#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
-
-/* SYSCR Masks */
-#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
-#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
-
-
-/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
-/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
-
-#if 0
-#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
-
-#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
-#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
-#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
-#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
-#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
-#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
-#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
-
-#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
-#define IRQ_TWI 0x00000200 /* TWI Interrupt */
-#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
-#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
-#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
-#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
-#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
-#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
-
-#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
-#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
-#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
-#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
-#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
-#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
-#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
-#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
-#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
-#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
-
-#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
-#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
-#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
-#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
-#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
-#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
-#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
-#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
-#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
-#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
-#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
-#endif
-
-/* SIC_IAR0 Macros */
-#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
-#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
-#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
-#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
-#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
-#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
-#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
-#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
-
-/* SIC_IAR1 Macros */
-#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
-#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
-#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
-#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
-#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
-#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
-#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
-#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
-
-/* SIC_IAR2 Macros */
-#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
-#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
-#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
-#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
-#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
-#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
-#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
-#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
-
-/* SIC_IAR3 Macros */
-#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
-#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
-#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
-#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
-#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
-#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
-#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
-#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
-
-
-/* SIC_IMASK Masks */
-#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
-#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
-#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
-#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
-
-/* SIC_IWR Masks */
-#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
-#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
-#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
-#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
-
-
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks */
-#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
-#define STB 0x04 /* Stop Bits */
-#define PEN 0x08 /* Parity Enable */
-#define EPS 0x10 /* Even Parity Select */
-#define STP 0x20 /* Stick Parity */
-#define SB 0x40 /* Set Break */
-#define DLAB 0x80 /* Divisor Latch Access */
-
-/* UARTx_MCR Mask */
-#define LOOP_ENA 0x10 /* Loopback Mode Enable */
-#define LOOP_ENA_P 0x04
-
-/* UARTx_LSR Masks */
-#define DR 0x01 /* Data Ready */
-#define OE 0x02 /* Overrun Error */
-#define PE 0x04 /* Parity Error */
-#define FE 0x08 /* Framing Error */
-#define BI 0x10 /* Break Interrupt */
-#define THRE 0x20 /* THR Empty */
-#define TEMT 0x40 /* TSR and UART_THR Empty */
-
-/* UARTx_IER Masks */
-#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
-#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
-#define ELSI 0x04 /* Enable RX Status Interrupt */
-
-/* UARTx_IIR Masks */
-#define NINT 0x01 /* Pending Interrupt */
-#define IIR_TX_READY 0x02 /* UART_THR empty */
-#define IIR_RX_READY 0x04 /* Receive data ready */
-#define IIR_LINE_CHANGE 0x06 /* Receive line status */
-#define IIR_STATUS 0x06 /* Highest Priority Pending Interrupt */
-
-/* UARTx_GCTL Masks */
-#define UCEN 0x01 /* Enable UARTx Clocks */
-#define IREN 0x02 /* Enable IrDA Mode */
-#define TPOLC 0x04 /* IrDA TX Polarity Change */
-#define RPOLC 0x08 /* IrDA RX Polarity Change */
-#define FPE 0x10 /* Force Parity Error On Transmit */
-#define FFE 0x20 /* Force Framing Error On Transmit */
-
-
-/* **************** GENERAL PURPOSE TIMER MASKS **********************/
-/* TIMER_ENABLE Masks */
-#define TIMEN0 0x0001 /* Enable Timer 0 */
-#define TIMEN1 0x0002 /* Enable Timer 1 */
-#define TIMEN2 0x0004 /* Enable Timer 2 */
-#define TIMEN3 0x0008 /* Enable Timer 3 */
-#define TIMEN4 0x0010 /* Enable Timer 4 */
-#define TIMEN5 0x0020 /* Enable Timer 5 */
-#define TIMEN6 0x0040 /* Enable Timer 6 */
-#define TIMEN7 0x0080 /* Enable Timer 7 */
-
-/* TIMER_DISABLE Masks */
-#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
-#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
-#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
-#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
-#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
-#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
-#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
-#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
-
-/* TIMER_STATUS Masks */
-#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
-#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
-#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
-#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
-#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
-#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
-#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
-#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
-#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
-#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
-#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
-#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
-#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
-#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
-#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
-#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
-#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
-#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
-#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
-#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
-#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
-#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
-#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
-#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define TOVL_ERR0 TOVF_ERR0
-#define TOVL_ERR1 TOVF_ERR1
-#define TOVL_ERR2 TOVF_ERR2
-#define TOVL_ERR3 TOVF_ERR3
-#define TOVL_ERR4 TOVF_ERR4
-#define TOVL_ERR5 TOVF_ERR5
-#define TOVL_ERR6 TOVF_ERR6
-#define TOVL_ERR7 TOVF_ERR7
-
-/* TIMERx_CONFIG Masks */
-#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
-#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
-#define EXT_CLK 0x0003 /* External Clock Mode */
-#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
-#define PERIOD_CNT 0x0008 /* Period Count */
-#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
-#define TIN_SEL 0x0020 /* Timer Input Select */
-#define OUT_DIS 0x0040 /* Output Pad Disable */
-#define CLK_SEL 0x0080 /* Timer Clock Select */
-#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
-#define EMU_RUN 0x0200 /* Emulation Behavior Select */
-#define ERR_TYP 0xC000 /* Error Type */
-
-
-/* ****************** GPIO PORTS F, G, H MASKS ***********************/
-/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
-/* Port F Masks */
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* Port G Masks */
-#define PG0 0x0001
-#define PG1 0x0002
-#define PG2 0x0004
-#define PG3 0x0008
-#define PG4 0x0010
-#define PG5 0x0020
-#define PG6 0x0040
-#define PG7 0x0080
-#define PG8 0x0100
-#define PG9 0x0200
-#define PG10 0x0400
-#define PG11 0x0800
-#define PG12 0x1000
-#define PG13 0x2000
-#define PG14 0x4000
-#define PG15 0x8000
-
-/* Port H Masks */
-#define PH0 0x0001
-#define PH1 0x0002
-#define PH2 0x0004
-#define PH3 0x0008
-#define PH4 0x0010
-#define PH5 0x0020
-#define PH6 0x0040
-#define PH7 0x0080
-#define PH8 0x0100
-#define PH9 0x0200
-#define PH10 0x0400
-#define PH11 0x0800
-#define PH12 0x1000
-#define PH13 0x2000
-#define PH14 0x4000
-#define PH15 0x8000
-
-/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
-/* EBIU_AMGCTL Masks */
-#define AMCKEN 0x0001 /* Enable CLKOUT */
-#define AMBEN_NONE 0x0000 /* All Banks Disabled */
-#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
-#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
-#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
-#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
-
-/* EBIU_AMBCTL0 Masks */
-#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
-#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
-#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
-#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
-#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
-#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
-#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
-#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
-#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
-#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
-#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
-#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
-#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
-#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
-#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
-#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
-#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
-#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
-#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
-#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
-#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
-#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
-#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
-#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
-#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
-#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
-#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
-#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
-#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
-#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
-#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
-#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
-#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
-#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
-#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
-#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
-
-#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
-#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
-#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
-#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
-#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
-#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
-#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
-#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
-#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
-#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
-#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
-#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
-#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
-#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
-#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
-#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
-#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
-#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
-#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
-#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
-#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
-#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
-#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
-#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
-#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
-#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
-#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
-#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
-#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
-#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
-#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
-#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
-#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
-#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
-#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
-#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
-
-/* EBIU_AMBCTL1 Masks */
-#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
-#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
-#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
-#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
-#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
-#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
-#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
-#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
-#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
-#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
-#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
-#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
-#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
-#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
-#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
-#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
-#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
-#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
-#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
-#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
-#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
-#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
-#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
-#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
-#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
-#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
-#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
-#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
-#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
-#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
-#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
-#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
-#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
-#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
-#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
-#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
-
-#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
-#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
-#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
-#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
-#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
-#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
-#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
-#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
-#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
-#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
-#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
-#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
-#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
-#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
-#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
-#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
-#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
-#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
-#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
-#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
-#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
-#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
-#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
-#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
-#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
-#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
-#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
-#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
-#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
-#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
-#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
-#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
-#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
-#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
-#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
-#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
-#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
-#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
-#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
-#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
-#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
-#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
-#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
-#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
-
-
-/* ********************** SDRAM CONTROLLER MASKS **********************************************/
-/* EBIU_SDGCTL Masks */
-#define SCTLE 0x00000001 /* Enable SDRAM Signals */
-#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
-#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
-#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
-#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
-#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
-#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
-#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
-#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
-#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
-#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
-#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
-#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
-#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
-#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
-#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
-#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
-#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
-#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
-#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
-#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
-#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
-#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
-#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
-#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
-#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
-#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
-#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
-#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
-#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
-#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
-#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
-#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
-#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
-#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
-#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
-#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
-#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
-#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
-#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
-#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
-#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
-#define EBUFE 0x02000000 /* Enable External Buffering Timing */
-#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
-#define EMREN 0x10000000 /* Extended Mode Register Enable */
-#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
-#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
-
-/* EBIU_SDBCTL Masks */
-#define EBE 0x0001 /* Enable SDRAM External Bank */
-#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
-#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
-#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
-#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
-#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
-#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
-#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
-#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
-#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
-#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
-
-/* EBIU_SDSTAT Masks */
-#define SDCI 0x0001 /* SDRAM Controller Idle */
-#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
-#define SDPUA 0x0004 /* SDRAM Power-Up Active */
-#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
-#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
-#define BGSTAT 0x0020 /* Bus Grant Status */
-
-
-/* ************************** DMA CONTROLLER MASKS ********************************/
-
-/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
-#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
-#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
-#define PMAP_PPI 0x0000 /* PPI Port DMA */
-#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
-#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
-#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
-#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
-#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
-#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
-#define PMAP_SPI 0x7000 /* SPI Port DMA */
-#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
-#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
-#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
-#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
-
-/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
-/* PPI_CONTROL Masks */
-#define PORT_EN 0x0001 /* PPI Port Enable */
-#define PORT_DIR 0x0002 /* PPI Port Direction */
-#define XFR_TYPE 0x000C /* PPI Transfer Type */
-#define PORT_CFG 0x0030 /* PPI Port Configuration */
-#define FLD_SEL 0x0040 /* PPI Active Field Select */
-#define PACK_EN 0x0080 /* PPI Packing Mode */
-#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
-#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
-#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
-#define DLEN_8 0x0000 /* Data Length = 8 Bits */
-#define DLEN_10 0x0800 /* Data Length = 10 Bits */
-#define DLEN_11 0x1000 /* Data Length = 11 Bits */
-#define DLEN_12 0x1800 /* Data Length = 12 Bits */
-#define DLEN_13 0x2000 /* Data Length = 13 Bits */
-#define DLEN_14 0x2800 /* Data Length = 14 Bits */
-#define DLEN_15 0x3000 /* Data Length = 15 Bits */
-#define DLEN_16 0x3800 /* Data Length = 16 Bits */
-#define DLENGTH 0x3800 /* PPI Data Length */
-#define POLC 0x4000 /* PPI Clock Polarity */
-#define POLS 0x8000 /* PPI Frame Sync Polarity */
-
-/* PPI_STATUS Masks */
-#define FLD 0x0400 /* Field Indicator */
-#define FT_ERR 0x0800 /* Frame Track Error */
-#define OVR 0x1000 /* FIFO Overflow Error */
-#define UNDR 0x2000 /* FIFO Underrun Error */
-#define ERR_DET 0x4000 /* Error Detected Indicator */
-#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
-
-
-/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
-#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
-#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
-
-/* TWI_PRESCALE Masks */
-#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
-#define TWI_ENA 0x0080 /* TWI Enable */
-#define SCCB 0x0200 /* SCCB Compatibility Enable */
-
-/* TWI_SLAVE_CTL Masks */
-#define SEN 0x0001 /* Slave Enable */
-#define SADD_LEN 0x0002 /* Slave Address Length */
-#define STDVAL 0x0004 /* Slave Transmit Data Valid */
-#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
-#define GEN 0x0010 /* General Call Adrress Matching Enabled */
-
-/* TWI_SLAVE_STAT Masks */
-#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
-#define GCALL 0x0002 /* General Call Indicator */
-
-/* TWI_MASTER_CTL Masks */
-#define MEN 0x0001 /* Master Mode Enable */
-#define MADD_LEN 0x0002 /* Master Address Length */
-#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
-#define FAST 0x0008 /* Use Fast Mode Timing Specs */
-#define STOP 0x0010 /* Issue Stop Condition */
-#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
-#define DCNT 0x3FC0 /* Data Bytes To Transfer */
-#define SDAOVR 0x4000 /* Serial Data Override */
-#define SCLOVR 0x8000 /* Serial Clock Override */
-
-/* TWI_MASTER_STAT Masks */
-#define MPROG 0x0001 /* Master Transfer In Progress */
-#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
-#define ANAK 0x0004 /* Address Not Acknowledged */
-#define DNAK 0x0008 /* Data Not Acknowledged */
-#define BUFRDERR 0x0010 /* Buffer Read Error */
-#define BUFWRERR 0x0020 /* Buffer Write Error */
-#define SDASEN 0x0040 /* Serial Data Sense */
-#define SCLSEN 0x0080 /* Serial Clock Sense */
-#define BUSBUSY 0x0100 /* Bus Busy Indicator */
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
-#define SINIT 0x0001 /* Slave Transfer Initiated */
-#define SCOMP 0x0002 /* Slave Transfer Complete */
-#define SERR 0x0004 /* Slave Transfer Error */
-#define SOVF 0x0008 /* Slave Overflow */
-#define MCOMP 0x0010 /* Master Transfer Complete */
-#define MERR 0x0020 /* Master Transfer Error */
-#define XMTSERV 0x0040 /* Transmit FIFO Service */
-#define RCVSERV 0x0080 /* Receive FIFO Service */
-
-/* TWI_FIFO_CTRL Masks */
-#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
-#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
-#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
-#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
-
-/* TWI_FIFO_STAT Masks */
-#define XMTSTAT 0x0003 /* Transmit FIFO Status */
-#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
-#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
-#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
-
-#define RCVSTAT 0x000C /* Receive FIFO Status */
-#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
-#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
-#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-
-
-/* Omit CAN masks from defBF534.h */
-
-/* ******************* PIN CONTROL REGISTER MASKS ************************/
-/* PORT_MUX Masks */
-#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
-#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
-#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
-
-#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
-#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
-#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
-#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
-
-#define PFDE 0x0008 /* Port F DMA Request Enable */
-#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
-#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
-
-#define PFTE 0x0010 /* Port F Timer Enable */
-#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
-#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
-
-#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
-#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
-#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
-
-#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
-#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
-#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
-
-#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
-#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
-#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
-
-#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
-#define PFFE_TIMER 0x0000 /* Enable TMR2 */
-#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
-
-#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
-#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
-#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
-
-#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
-#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
-#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
-
-#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
-#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
-#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
-
-
-/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
-/* HDMAx_CTL Masks */
-#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
-#define REP 0x0002 /* HDMA Request Polarity */
-#define UTE 0x0004 /* Urgency Threshold Enable */
-#define OIE 0x0010 /* Overflow Interrupt Enable */
-#define BDIE 0x0020 /* Block Done Interrupt Enable */
-#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
-#define DRQ 0x0300 /* HDMA Request Type */
-#define DRQ_NONE 0x0000 /* No Request */
-#define DRQ_SINGLE 0x0100 /* Channels Request Single */
-#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
-#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
-#define RBC 0x1000 /* Reload BCNT With IBCNT */
-#define PS 0x2000 /* HDMA Pin Status */
-#define OI 0x4000 /* Overflow Interrupt Generated */
-#define BDI 0x8000 /* Block Done Interrupt Generated */
-
-/* entry addresses of the user-callable Boot ROM functions */
-
-#define _BOOTROM_RESET 0xEF000000
-#define _BOOTROM_FINAL_INIT 0xEF000002
-#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
-#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
-#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
-#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
-#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
-#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
-#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define PGDE_UART PFDE_UART
-#define PGDE_DMA PFDE_DMA
-#define CKELOW SCKELOW
-
-/* ==== end from defBF534.h ==== */
-
-/* HOST Port Registers */
-
-#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
-#define HOST_STATUS 0xffc03404 /* HOST Status Register */
-#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
-
-/* Counter Registers */
-
-#define CNT_CONFIG 0xffc03500 /* Configuration Register */
-#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
-#define CNT_STATUS 0xffc03508 /* Status Register */
-#define CNT_COMMAND 0xffc0350c /* Command Register */
-#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
-#define CNT_COUNTER 0xffc03514 /* Counter Register */
-#define CNT_MAX 0xffc03518 /* Maximal Count Register */
-#define CNT_MIN 0xffc0351c /* Minimal Count Register */
-
-/* OTP/FUSE Registers */
-
-#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
-#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
-#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
-#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
-
-/* Security Registers */
-
-#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
-#define SECURE_CONTROL 0xffc03624 /* Secure Control */
-#define SECURE_STATUS 0xffc03628 /* Secure Status */
-
-/* OTP Read/Write Data Buffer Registers */
-
-#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-
-/* NFC Registers */
-
-#define NFC_CTL 0xffc03700 /* NAND Control Register */
-#define NFC_STAT 0xffc03704 /* NAND Status Register */
-#define NFC_IRQSTAT 0xffc03708 /* NAND Interrupt Status Register */
-#define NFC_IRQMASK 0xffc0370c /* NAND Interrupt Mask Register */
-#define NFC_ECC0 0xffc03710 /* NAND ECC Register 0 */
-#define NFC_ECC1 0xffc03714 /* NAND ECC Register 1 */
-#define NFC_ECC2 0xffc03718 /* NAND ECC Register 2 */
-#define NFC_ECC3 0xffc0371c /* NAND ECC Register 3 */
-#define NFC_COUNT 0xffc03720 /* NAND ECC Count Register */
-#define NFC_RST 0xffc03724 /* NAND ECC Reset Register */
-#define NFC_PGCTL 0xffc03728 /* NAND Page Control Register */
-#define NFC_READ 0xffc0372c /* NAND Read Data Register */
-#define NFC_ADDR 0xffc03740 /* NAND Address Register */
-#define NFC_CMD 0xffc03744 /* NAND Command Register */
-#define NFC_DATA_WR 0xffc03748 /* NAND Data Write Register */
-#define NFC_DATA_RD 0xffc0374c /* NAND Data Read Register */
-
-/* ********************************************************** */
-/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
-/* and MULTI BIT READ MACROS */
-/* ********************************************************** */
-
-/* Bit masks for HOST_CONTROL */
-
-#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
-#define HOST_CNTR_nHOST_EN 0x0
-#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
-#define HOST_CNTR_nHOST_END 0x0
-#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
-#define HOST_CNTR_nDATA_SIZE 0x0
-#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
-#define HOST_CNTR_nHOST_RST 0x0
-#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
-#define HOST_CNTR_nHRDY_OVR 0x0
-#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
-#define HOST_CNTR_nINT_MODE 0x0
-#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
-#define HOST_CNTR_ nBT_EN 0x0
-#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
-#define HOST_CNTR_nEHW 0x0
-#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
-#define HOST_CNTR_nEHR 0x0
-#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
-#define HOST_CNTR_nBDR 0x0
-
-/* Bit masks for HOST_STATUS */
-
-#define HOST_STAT_READY 0x1 /* DMA Ready */
-#define HOST_STAT_nREADY 0x0
-#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
-#define HOST_STAT_nFIFOFULL 0x0
-#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
-#define HOST_STAT_nFIFOEMPTY 0x0
-#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
-#define HOST_STAT_nCOMPLETE 0x0
-#define HOST_STAT_HSHK 0x10 /* Host Handshake */
-#define HOST_STAT_nHSHK 0x0
-#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
-#define HOST_STAT_nTIMEOUT 0x0
-#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
-#define HOST_STAT_nHIRQ 0x0
-#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
-#define HOST_STAT_nALLOW_CNFG 0x0
-#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
-#define HOST_STAT_nDMA_DIR 0x0
-#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
-#define HOST_STAT_nBTE 0x0
-#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
-#define HOST_STAT_nHOSTRD_DONE 0x0
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
-
-/* Bit masks for SECURE_SYSSWT */
-
-#define EMUDABL 0x1 /* Emulation Disable. */
-#define nEMUDABL 0x0
-#define RSTDABL 0x2 /* Reset Disable */
-#define nRSTDABL 0x0
-#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
-#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
-#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
-#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
-#define nDMA0OVR 0x0
-#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
-#define nDMA1OVR 0x0
-#define EMUOVR 0x4000 /* Emulation Override */
-#define nEMUOVR 0x0
-#define OTPSEN 0x8000 /* OTP Secrets Enable. */
-#define nOTPSEN 0x0
-#define L2DABL 0x70000 /* L2 Memory Disable. */
-
-/* Bit masks for SECURE_CONTROL */
-
-#define SECURE0 0x1 /* SECURE 0 */
-#define nSECURE0 0x0
-#define SECURE1 0x2 /* SECURE 1 */
-#define nSECURE1 0x0
-#define SECURE2 0x4 /* SECURE 2 */
-#define nSECURE2 0x0
-#define SECURE3 0x8 /* SECURE 3 */
-#define nSECURE3 0x0
-
-/* Bit masks for SECURE_STATUS */
-
-#define SECMODE 0x3 /* Secured Mode Control State */
-#define NMI 0x4 /* Non Maskable Interrupt */
-#define nNMI 0x0
-#define AFVALID 0x8 /* Authentication Firmware Valid */
-#define nAFVALID 0x0
-#define AFEXIT 0x10 /* Authentication Firmware Exit */
-#define nAFEXIT 0x0
-#define SECSTAT 0xe0 /* Secure Status */
-
-#endif /* _DEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/gpio.h b/arch/blackfin/mach-bf527/include/mach/gpio.h
index f80c2995efd..fba606b699c 100644
--- a/arch/blackfin/mach-bf527/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf527/include/mach/gpio.h
@@ -62,4 +62,8 @@
#define PORT_G GPIO_PG0
#define PORT_H GPIO_PH0
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf527/include/mach/pll.h b/arch/blackfin/mach-bf527/include/mach/pll.h
index 24f1d7c0232..94cca674d83 100644
--- a/arch/blackfin/mach-bf527/include/mach/pll.h
+++ b/arch/blackfin/mach-bf527/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_PLL_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
-
- bfin_write16(PLL_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_VR_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
-
- bfin_write16(VR_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 2ce7b16faee..d4bfcea5682 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -286,7 +286,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 20c102285be..87b5af3693c 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -25,7 +25,6 @@
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
-#include <mach/fio_flag.h>
/*
* Name the Board for the /proc/cpuinfo
@@ -225,7 +224,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -290,9 +289,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -324,9 +323,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
@@ -476,10 +475,16 @@ static int __init blackstamp_init(void)
return ret;
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
- /* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */
- bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0);
- bfin_write_FIO_FLAG_S(PF0);
- SSYNC();
+ /*
+ * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
+ * the bfin-async-map driver takes care of flipping between
+ * flash and ethernet when necessary.
+ */
+ ret = gpio_request(GPIO_PF0, "enet_cpld");
+ if (!ret) {
+ gpio_direction_output(GPIO_PF0, 1);
+ gpio_free(GPIO_PF0);
+ }
#endif
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index adbe62a81e2..4d5604eaa7c 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -271,7 +271,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -336,9 +336,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -370,9 +370,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index a1cb8e7c101..b67b91d8224 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -349,7 +349,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index 5ba4b02a12e..f869a371148 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -22,7 +22,6 @@
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
-#include <mach/fio_flag.h>
/*
* Name the Board for the /proc/cpuinfo
@@ -174,7 +173,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -295,15 +294,7 @@ static int __init ip0x_init(void)
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(ip0x_devices, ARRAY_SIZE(ip0x_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
- for (i = 0; i < ARRAY_SIZE(bfin_spi_board_info); ++i) {
- int j = 1 << bfin_spi_board_info[i].chip_select;
- /* set spi cs to 1 */
- bfin_write_FIO_DIR(bfin_read_FIO_DIR() | j);
- bfin_write_FIO_FLAG_S(j);
- }
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index b3b1cdea270..43224ef00b8 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -24,7 +24,6 @@
#include <asm/reboot.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
-#include <mach/fio_flag.h>
/*
* Name the Board for the /proc/cpuinfo
@@ -354,7 +353,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -419,9 +418,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +452,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
@@ -674,10 +673,16 @@ static int __init stamp_init(void)
return ret;
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
- /* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */
- bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0);
- bfin_write_FIO_FLAG_S(PF0);
- SSYNC();
+ /*
+ * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
+ * the bfin-async-map driver takes care of flipping between
+ * flash and ethernet when necessary.
+ */
+ ret = gpio_request(GPIO_PF0, "enet_cpld");
+ if (!ret) {
+ gpio_direction_output(GPIO_PF0, 1);
+ gpio_free(GPIO_PF0);
+ }
#endif
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
@@ -713,7 +718,6 @@ void __init native_machine_early_platform_add_devices(void)
void native_machine_restart(char *cmd)
{
/* workaround pull up on cpld / flash pin not being strong enough */
- bfin_write_FIO_INEN(~PF0);
- bfin_write_FIO_DIR(PF0);
- bfin_write_FIO_FLAG_C(PF0);
+ gpio_request(GPIO_PF0, "flash_cpld");
+ gpio_direction_output(GPIO_PF0, 0);
}
diff --git a/arch/blackfin/mach-bf533/dma.c b/arch/blackfin/mach-bf533/dma.c
index 4a14a46a9a6..1f5988d4313 100644
--- a/arch/blackfin/mach-bf533/dma.c
+++ b/arch/blackfin/mach-bf533/dma.c
@@ -11,7 +11,7 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
new file mode 100644
index 00000000000..08072c86d5d
--- /dev/null
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS 1
+
+#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
index 9e1f3defb6b..45dcaa4f3e4 100644
--- a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
* Licensed under the GPL-2 or later
*/
-#include <linux/serial.h>
#include <asm/dma.h>
#include <asm/portmux.h>
-#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
#ifdef CONFIG_BFIN_UART0_CTSRTS
# define CONFIG_SERIAL_BFIN_CTSRTS
# ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
# endif
#endif
-#define BFIN_UART_TX_FIFO_SIZE 2
-
-struct bfin_serial_port {
- struct uart_port port;
- unsigned int old_status;
- int status_irq;
- unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- int tx_done;
- int tx_count;
- struct circ_buf rx_dma_buf;
- struct timer_list rx_dma_timer;
- int rx_dma_nrows;
- unsigned int tx_dma_channel;
- unsigned int rx_dma_channel;
- struct work_struct tx_dma_workqueue;
-#else
-# if ANOMALY_05000363
- unsigned int anomaly_threshold;
-# endif
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- struct timer_list cts_timer;
- int cts_pin;
- int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
- unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
- uart->lsr |= (lsr & (BI|FE|PE|OE));
- return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
- uart->lsr = 0;
- bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
struct bfin_serial_res {
unsigned long uart_base_addr;
int uart_irq;
@@ -120,3 +48,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
};
#define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index f4bd6df5d96..e366207fbf1 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -1,7 +1,7 @@
/*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
*/
#ifndef _MACH_BLACKFIN_H_
@@ -10,26 +10,14 @@
#define BF533_FAMILY
#include "bf533.h"
-#include "defBF532.h"
#include "anomaly.h"
-#if !defined(__ASSEMBLY__)
-#include "cdefBF532.h"
-#endif
-
-#define BFIN_UART_NR_PORTS 1
+#include <asm/def_LPBlackfin.h>
+#include "defBF532.h"
-#define OFFSET_THR 0x00 /* Transmit Holding register */
-#define OFFSET_RBR 0x00 /* Receive Buffer register */
-#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
-#define OFFSET_IER 0x04 /* Interrupt Enable Register */
-#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
-#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
-#define OFFSET_LCR 0x0C /* Line Control Register */
-#define OFFSET_MCR 0x10 /* Modem Control Register */
-#define OFFSET_LSR 0x14 /* Line Status Register */
-#define OFFSET_MSR 0x18 /* Modem Status Register */
-#define OFFSET_SCR 0x1C /* SCR Scratch Register */
-#define OFFSET_GCTL 0x24 /* Global Control Register */
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# include "cdefBF532.h"
+#endif
-#endif /* _MACH_BLACKFIN_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
index 401e524f532..fd0cbe4df21 100644
--- a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
@@ -7,9 +7,6 @@
#ifndef _CDEF_BF532_H
#define _CDEF_BF532_H
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
/* Clock and System Control (0xFFC0 0400-0xFFC0 07FF) */
#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
@@ -66,16 +63,10 @@
#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN,val)
/* DMA Traffic controls */
-#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER,val)
-#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT,val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER,val)
-#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT,val)
+#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER,val)
+#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT,val)
/* General Purpose IO (0xFFC0 2400-0xFFC0 27FF) */
#define bfin_read_FIO_DIR() bfin_read16(FIO_DIR)
@@ -105,6 +96,47 @@
#define bfin_read_FIO_MASKB_T() bfin_read16(FIO_MASKB_T)
#define bfin_write_FIO_MASKB_T(val) bfin_write16(FIO_MASKB_T,val)
+#if ANOMALY_05000311
+/* Keep at the CPP expansion to avoid circular header dependency loops */
+#define BFIN_WRITE_FIO_FLAG(name, val) \
+ do { \
+ unsigned long __flags; \
+ __flags = hard_local_irq_save(); \
+ bfin_write16(FIO_FLAG_##name, val); \
+ bfin_read_CHIPID(); \
+ hard_local_irq_restore(__flags); \
+ } while (0)
+#define bfin_write_FIO_FLAG_D(val) BFIN_WRITE_FIO_FLAG(D, val)
+#define bfin_write_FIO_FLAG_C(val) BFIN_WRITE_FIO_FLAG(C, val)
+#define bfin_write_FIO_FLAG_S(val) BFIN_WRITE_FIO_FLAG(S, val)
+#define bfin_write_FIO_FLAG_T(val) BFIN_WRITE_FIO_FLAG(T, val)
+
+#define BFIN_READ_FIO_FLAG(name) \
+ ({ \
+ unsigned long __flags; \
+ u16 __ret; \
+ __flags = hard_local_irq_save(); \
+ __ret = bfin_read16(FIO_FLAG_##name); \
+ bfin_read_CHIPID(); \
+ hard_local_irq_restore(__flags); \
+ __ret; \
+ })
+#define bfin_read_FIO_FLAG_D() BFIN_READ_FIO_FLAG(D)
+#define bfin_read_FIO_FLAG_C() BFIN_READ_FIO_FLAG(C)
+#define bfin_read_FIO_FLAG_S() BFIN_READ_FIO_FLAG(S)
+#define bfin_read_FIO_FLAG_T() BFIN_READ_FIO_FLAG(T)
+
+#else
+#define bfin_write_FIO_FLAG_D(val) bfin_write16(FIO_FLAG_D, val)
+#define bfin_write_FIO_FLAG_C(val) bfin_write16(FIO_FLAG_C, val)
+#define bfin_write_FIO_FLAG_S(val) bfin_write16(FIO_FLAG_S, val)
+#define bfin_write_FIO_FLAG_T(val) bfin_write16(FIO_FLAG_T, val)
+#define bfin_read_FIO_FLAG_D() bfin_read16(FIO_FLAG_D)
+#define bfin_read_FIO_FLAG_C() bfin_read16(FIO_FLAG_C)
+#define bfin_read_FIO_FLAG_S() bfin_read16(FIO_FLAG_S)
+#define bfin_read_FIO_FLAG_T() bfin_read16(FIO_FLAG_T)
+#endif
+
/* DMA Controller */
#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG,val)
@@ -647,7 +679,4 @@
#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME,val)
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
#endif /* _CDEF_BF532_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index 3adb0b44e59..2376d539351 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -1,7 +1,7 @@
/*
* System & MMR bit and Address definitions for ADSP-BF532
*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -9,9 +9,6 @@
#ifndef _DEF_BF532_H
#define _DEF_BF532_H
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
/*********************************************************************************** */
/* System MMR Register Map */
/*********************************************************************************** */
@@ -182,12 +179,8 @@
#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
/* DMA Traffic controls */
-#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
+#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
+#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
@@ -432,83 +425,6 @@
#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */
#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */
-/* ***************************** UART CONTROLLER MASKS ********************** */
-
-/* UART_LCR Register */
-
-#define DLAB 0x80
-#define SB 0x40
-#define STP 0x20
-#define EPS 0x10
-#define PEN 0x08
-#define STB 0x04
-#define WLS(x) ((x-5) & 0x03)
-
-#define DLAB_P 0x07
-#define SB_P 0x06
-#define STP_P 0x05
-#define EPS_P 0x04
-#define PEN_P 0x03
-#define STB_P 0x02
-#define WLS_P1 0x01
-#define WLS_P0 0x00
-
-/* UART_MCR Register */
-#define LOOP_ENA 0x10
-#define LOOP_ENA_P 0x04
-
-/* UART_LSR Register */
-#define TEMT 0x40
-#define THRE 0x20
-#define BI 0x10
-#define FE 0x08
-#define PE 0x04
-#define OE 0x02
-#define DR 0x01
-
-#define TEMP_P 0x06
-#define THRE_P 0x05
-#define BI_P 0x04
-#define FE_P 0x03
-#define PE_P 0x02
-#define OE_P 0x01
-#define DR_P 0x00
-
-/* UART_IER Register */
-#define ELSI 0x04
-#define ETBEI 0x02
-#define ERBFI 0x01
-
-#define ELSI_P 0x02
-#define ETBEI_P 0x01
-#define ERBFI_P 0x00
-
-/* UART_IIR Register */
-#define STATUS(x) ((x << 1) & 0x06)
-#define NINT 0x01
-#define STATUS_P1 0x02
-#define STATUS_P0 0x01
-#define NINT_P 0x00
-#define IIR_TX_READY 0x02 /* UART_THR empty */
-#define IIR_RX_READY 0x04 /* Receive data ready */
-#define IIR_LINE_CHANGE 0x06 /* Receive line status */
-#define IIR_STATUS 0x06
-
-/* UART_GCTL Register */
-#define FFE 0x20
-#define FPE 0x10
-#define RPOLC 0x08
-#define TPOLC 0x04
-#define IREN 0x02
-#define UCEN 0x01
-
-#define FFE_P 0x05
-#define FPE_P 0x04
-#define RPOLC_P 0x03
-#define TPOLC_P 0x02
-#define IREN_P 0x01
-#define UCEN_P 0x00
-
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
@@ -643,44 +559,6 @@
#define ERR_TYP_P0 0x0E
#define ERR_TYP_P1 0x0F
-/*/ ****************** PROGRAMMABLE FLAG MASKS ********************* */
-
-/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* General Purpose IO (0xFFC00700 - 0xFFC007FF) BIT POSITIONS */
-#define PF0_P 0
-#define PF1_P 1
-#define PF2_P 2
-#define PF3_P 3
-#define PF4_P 4
-#define PF5_P 5
-#define PF6_P 6
-#define PF7_P 7
-#define PF8_P 8
-#define PF9_P 9
-#define PF10_P 10
-#define PF11_P 11
-#define PF12_P 12
-#define PF13_P 13
-#define PF14_P 14
-#define PF15_P 15
-
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
/* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf533/include/mach/fio_flag.h b/arch/blackfin/mach-bf533/include/mach/fio_flag.h
deleted file mode 100644
index d0bfba0b083..00000000000
--- a/arch/blackfin/mach-bf533/include/mach/fio_flag.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_FIO_FLAG_H
-#define _MACH_FIO_FLAG_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-#if ANOMALY_05000311
-#define BFIN_WRITE_FIO_FLAG(name) \
-static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \
-{ \
- unsigned long flags; \
- flags = hard_local_irq_save(); \
- bfin_write16(FIO_FLAG_##name, val); \
- bfin_read_CHIPID(); \
- hard_local_irq_restore(flags); \
-}
-BFIN_WRITE_FIO_FLAG(D)
-BFIN_WRITE_FIO_FLAG(C)
-BFIN_WRITE_FIO_FLAG(S)
-BFIN_WRITE_FIO_FLAG(T)
-
-#define BFIN_READ_FIO_FLAG(name) \
-static inline u16 bfin_read_FIO_FLAG_##name(void) \
-{ \
- unsigned long flags; \
- u16 ret; \
- flags = hard_local_irq_save(); \
- ret = bfin_read16(FIO_FLAG_##name); \
- bfin_read_CHIPID(); \
- hard_local_irq_restore(flags); \
- return ret; \
-}
-BFIN_READ_FIO_FLAG(D)
-BFIN_READ_FIO_FLAG(C)
-BFIN_READ_FIO_FLAG(S)
-BFIN_READ_FIO_FLAG(T)
-
-#else
-#define bfin_write_FIO_FLAG_D(val) bfin_write16(FIO_FLAG_D, val)
-#define bfin_write_FIO_FLAG_C(val) bfin_write16(FIO_FLAG_C, val)
-#define bfin_write_FIO_FLAG_S(val) bfin_write16(FIO_FLAG_S, val)
-#define bfin_write_FIO_FLAG_T(val) bfin_write16(FIO_FLAG_T, val)
-#define bfin_read_FIO_FLAG_T() bfin_read16(FIO_FLAG_T)
-#define bfin_read_FIO_FLAG_C() bfin_read16(FIO_FLAG_C)
-#define bfin_read_FIO_FLAG_S() bfin_read16(FIO_FLAG_S)
-#define bfin_read_FIO_FLAG_D() bfin_read16(FIO_FLAG_D)
-#endif
-
-#endif /* _MACH_FIO_FLAG_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/gpio.h b/arch/blackfin/mach-bf533/include/mach/gpio.h
index e02416db4b0..cce4f8fb378 100644
--- a/arch/blackfin/mach-bf533/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf533/include/mach/gpio.h
@@ -28,4 +28,6 @@
#define PORT_F GPIO_PF0
+#include <mach-common/ports-f.h>
+
#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf533/include/mach/pll.h b/arch/blackfin/mach-bf533/include/mach/pll.h
index 169c106d0ed..94cca674d83 100644
--- a/arch/blackfin/mach-bf533/include/mach/pll.h
+++ b/arch/blackfin/mach-bf533/include/mach/pll.h
@@ -1,57 +1 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
- unsigned long flags, iwr;
-
- if (val == bfin_read_PLL_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr = bfin_read32(SIC_IWR);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
- bfin_write16(PLL_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR, iwr);
- hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
- unsigned long flags, iwr;
-
- if (val == bfin_read_VR_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr = bfin_read32(SIC_IWR);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
- bfin_write16(VR_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR, iwr);
- hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf537/boards/Kconfig b/arch/blackfin/mach-bf537/boards/Kconfig
index 44132fda63b..a44bf3a1816 100644
--- a/arch/blackfin/mach-bf537/boards/Kconfig
+++ b/arch/blackfin/mach-bf537/boards/Kconfig
@@ -39,4 +39,10 @@ config CAMSIG_MINOTAUR
help
Board supply package for CSP Minotaur
+config DNP5370
+ bool "SSV Dil/NetPC DNP/5370"
+ depends on (BF537)
+ help
+ Board supply package for DNP/5370 DIL64 module
+
endchoice
diff --git a/arch/blackfin/mach-bf537/boards/Makefile b/arch/blackfin/mach-bf537/boards/Makefile
index 7e6aa4e5b20..fe42258fe1f 100644
--- a/arch/blackfin/mach-bf537/boards/Makefile
+++ b/arch/blackfin/mach-bf537/boards/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_BFIN537_BLUETECHNIX_CM_U) += cm_bf537u.o
obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM) += tcm_bf537.o
obj-$(CONFIG_PNAV10) += pnav10.o
obj-$(CONFIG_CAMSIG_MINOTAUR) += minotaur.o
+obj-$(CONFIG_DNP5370) += dnp5370.o
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 836698c4ee5..2c776e188a9 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -373,7 +373,7 @@ static struct resource bfin_uart0_resources[] = {
#endif
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -434,7 +434,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -545,9 +545,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -579,9 +579,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 2a85670273c..085661175ec 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -356,7 +356,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -399,7 +399,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -510,9 +510,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -544,9 +544,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
new file mode 100644
index 00000000000..e1e9ea02ad8
--- /dev/null
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -0,0 +1,418 @@
+/*
+ * This is the configuration for SSV Dil/NetPC DNP/5370 board.
+ *
+ * DIL module: http://www.dilnetpc.com/dnp0086.htm
+ * SK28 (starter kit): http://www.dilnetpc.com/dnp0088.htm
+ *
+ * Copyright 2010 3ality Digital Systems
+ * Copyright 2005 National ICT Australia (NICTA)
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/spi/mmc_spi.h>
+#include <linux/phy.h>
+#include <asm/dma.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/reboot.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "DNP/5370";
+#define FLASH_MAC 0x202f0000
+#define CONFIG_MTD_PHYSMAP_LEN 0x300000
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+ .name = "rtc-bfin",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+ {
+ .addr = 1,
+ .irq = PHY_POLL, /* IRQ_MAC_PHYINT */
+ },
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+ .phydev_number = 1,
+ .phydev_data = bfin_phydev_data,
+ .phy_mode = PHY_INTERFACE_MODE_RMII,
+ .mac_peripherals = bfin_mac_peripherals,
+};
+
+static struct platform_device bfin_mii_bus = {
+ .name = "bfin_mii_bus",
+ .dev = {
+ .platform_data = &bfin_mii_bus_data,
+ }
+};
+
+static struct platform_device bfin_mac_device = {
+ .name = "bfin_mac",
+ .dev = {
+ .platform_data = &bfin_mii_bus,
+ }
+};
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct mtd_partition asmb_flash_partitions[] = {
+ {
+ .name = "bootloader(nor)",
+ .size = 0x30000,
+ .offset = 0,
+ }, {
+ .name = "linux kernel and rootfs(nor)",
+ .size = 0x300000 - 0x30000 - 0x10000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "MAC address(nor)",
+ .size = 0x10000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE,
+ }
+};
+
+static struct physmap_flash_data asmb_flash_data = {
+ .width = 1,
+ .parts = asmb_flash_partitions,
+ .nr_parts = ARRAY_SIZE(asmb_flash_partitions),
+};
+
+static struct resource asmb_flash_resource = {
+ .start = 0x20000000,
+ .end = 0x202fffff,
+ .flags = IORESOURCE_MEM,
+};
+
+/* 4 MB NOR flash attached to async memory banks 0-2,
+ * therefore only 3 MB visible.
+ */
+static struct platform_device asmb_flash_device = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &asmb_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &asmb_flash_resource,
+};
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+
+#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
+
+static int bfin_mmc_spi_init(struct device *dev,
+ irqreturn_t (*detect_int)(int, void *), void *data)
+{
+ return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
+ IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
+}
+
+static void bfin_mmc_spi_exit(struct device *dev, void *data)
+{
+ free_irq(MMC_SPI_CARD_DETECT_INT, data);
+}
+
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+ .enable_dma = 0, /* use no dma transfer with this chip*/
+ .bits_per_word = 8,
+};
+
+static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
+ .init = bfin_mmc_spi_init,
+ .exit = bfin_mmc_spi_exit,
+ .detect_delay = 100, /* msecs */
+};
+#endif
+
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+/* This mapping is for at45db642 it has 1056 page size,
+ * partition size and offset should be page aligned
+ */
+static struct mtd_partition bfin_spi_dataflash_partitions[] = {
+ {
+ .name = "JFFS2 dataflash(nor)",
+#ifdef CONFIG_MTD_PAGESIZE_1024
+ .offset = 0x40000,
+ .size = 0x7C0000,
+#else
+ .offset = 0x0,
+ .size = 0x840000,
+#endif
+ }
+};
+
+static struct flash_platform_data bfin_spi_dataflash_data = {
+ .name = "mtd_dataflash",
+ .parts = bfin_spi_dataflash_partitions,
+ .nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
+ .type = "mtd_dataflash",
+};
+
+static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
+ .enable_dma = 0, /* use no dma transfer with this chip*/
+ .bits_per_word = 8,
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+/* SD/MMC card reader at SPI bus */
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+ {
+ .modalias = "mmc_spi",
+ .max_speed_hz = 20000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .platform_data = &bfin_mmc_spi_pdata,
+ .controller_data = &mmc_spi_chip_info,
+ .mode = SPI_MODE_3,
+ },
+#endif
+
+/* 8 Megabyte Atmel NOR flash chip at SPI bus */
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+ {
+ .modalias = "mtd_dataflash",
+ .max_speed_hz = 16700000,
+ .bus_num = 0,
+ .chip_select = 2,
+ .platform_data = &bfin_spi_dataflash_data,
+ .controller_data = &spi_dataflash_chip_info,
+ .mode = SPI_MODE_3, /* SPI_CPHA and SPI_CPOL */
+ },
+#endif
+};
+
+/* SPI controller data */
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+ [0] = {
+ .start = SPI0_REGBASE,
+ .end = SPI0_REGBASE + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = CH_SPI,
+ .end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct bfin5xx_spi_master spi_bfin_master_info = {
+ .num_chipselect = 8,
+ .enable_dma = 1, /* master has the ability to do dma transfer */
+ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device spi_bfin_master_device = {
+ .name = "bfin-spi",
+ .id = 0, /* Bus number */
+ .num_resources = ARRAY_SIZE(bfin_spi0_resource),
+ .resource = bfin_spi0_resource,
+ .dev = {
+ .platform_data = &spi_bfin_master_info, /* Passed to driver */
+ },
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+static struct resource bfin_uart0_resources[] = {
+ {
+ .start = UART0_THR,
+ .end = UART0_GCTL+2,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_UART0_RX,
+ .end = IRQ_UART0_RX+1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_UART0_ERROR,
+ .end = IRQ_UART0_ERROR,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = CH_UART0_TX,
+ .end = CH_UART0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .start = CH_UART0_RX,
+ .end = CH_UART0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static unsigned short bfin_uart0_peripherals[] = {
+ P_UART0_TX, P_UART0_RX, 0
+};
+
+static struct platform_device bfin_uart0_device = {
+ .name = "bfin-uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_uart0_resources),
+ .resource = bfin_uart0_resources,
+ .dev = {
+ .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
+ },
+};
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_UART1
+static struct resource bfin_uart1_resources[] = {
+ {
+ .start = UART1_THR,
+ .end = UART1_GCTL+2,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_UART1_RX,
+ .end = IRQ_UART1_RX+1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_UART1_ERROR,
+ .end = IRQ_UART1_ERROR,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = CH_UART1_TX,
+ .end = CH_UART1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .start = CH_UART1_RX,
+ .end = CH_UART1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static unsigned short bfin_uart1_peripherals[] = {
+ P_UART1_TX, P_UART1_RX, 0
+};
+
+static struct platform_device bfin_uart1_device = {
+ .name = "bfin-uart",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(bfin_uart1_resources),
+ .resource = bfin_uart1_resources,
+ .dev = {
+ .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
+ },
+};
+#endif
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static struct resource bfin_twi0_resource[] = {
+ [0] = {
+ .start = TWI0_REGBASE,
+ .end = TWI0_REGBASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_TWI,
+ .end = IRQ_TWI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device i2c_bfin_twi_device = {
+ .name = "i2c-bfin-twi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_twi0_resource),
+ .resource = bfin_twi0_resource,
+};
+#endif
+
+static struct platform_device *dnp5370_devices[] __initdata = {
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+ &bfin_uart0_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+ &bfin_uart1_device,
+#endif
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+ &asmb_flash_device,
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+ &bfin_mii_bus,
+ &bfin_mac_device,
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+ &spi_bfin_master_device,
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+ &i2c_bfin_twi_device,
+#endif
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+ &rtc_device,
+#endif
+
+};
+
+static int __init dnp5370_init(void)
+{
+ printk(KERN_INFO "DNP/5370: registering device resources\n");
+ platform_add_devices(dnp5370_devices, ARRAY_SIZE(dnp5370_devices));
+ printk(KERN_INFO "DNP/5370: registering %zu SPI slave devices\n",
+ ARRAY_SIZE(bfin_spi_board_info));
+ spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+ printk(KERN_INFO "DNP/5370: MAC %pM\n", (void *)FLASH_MAC);
+ return 0;
+}
+arch_initcall(dnp5370_init);
+
+/*
+ * Currently the MAC address is saved in Flash by U-Boot
+ */
+void bfin_get_ether_addr(char *addr)
+{
+ *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
+ *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
+}
+EXPORT_SYMBOL(bfin_get_ether_addr);
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 49800518412..bfb3671a78d 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -263,7 +263,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -306,7 +306,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -419,9 +419,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +453,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index b95807894e2..9389f03e3b0 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -367,7 +367,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -410,7 +410,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 3aa344ce8e5..2c69785a7bb 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -289,7 +289,7 @@ static struct platform_device isp1362_hcd_device = {
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
@@ -693,7 +693,7 @@ static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
#endif
#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
-unsigned short ad2s120x_platform_data[] = {
+static unsigned short ad2s120x_platform_data[] = {
/* used as SAMPLE and RDVEL */
GPIO_PF5, GPIO_PF6, 0
};
@@ -705,7 +705,7 @@ static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
#endif
#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
-unsigned short ad2s1210_platform_data[] = {
+static unsigned short ad2s1210_platform_data[] = {
/* use as SAMPLE, A0, A1 */
GPIO_PF7, GPIO_PF8, GPIO_PF9,
# if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
@@ -1717,7 +1717,7 @@ static struct resource bfin_uart0_resources[] = {
#endif
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -1760,7 +1760,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -2447,9 +2447,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -2481,9 +2481,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 31498add1a4..0761b201abc 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -356,7 +356,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -399,7 +399,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -512,9 +512,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -546,9 +546,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/dma.c b/arch/blackfin/mach-bf537/dma.c
index 5c8c4ed517b..5c62e99c9fa 100644
--- a/arch/blackfin/mach-bf537/dma.c
+++ b/arch/blackfin/mach-bf537/dma.c
@@ -11,7 +11,7 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
new file mode 100644
index 00000000000..00c603fe821
--- /dev/null
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS 2
+
+#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
index 635c91c526a..3e955dba895 100644
--- a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
* Licensed under the GPL-2 or later
*/
-#include <linux/serial.h>
#include <asm/dma.h>
#include <asm/portmux.h>
-#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
# define CONFIG_SERIAL_BFIN_CTSRTS
@@ -54,49 +27,6 @@
# endif
#endif
-#define BFIN_UART_TX_FIFO_SIZE 2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
- struct uart_port port;
- unsigned int old_status;
- int status_irq;
- unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- int tx_done;
- int tx_count;
- struct circ_buf rx_dma_buf;
- struct timer_list rx_dma_timer;
- int rx_dma_nrows;
- unsigned int tx_dma_channel;
- unsigned int rx_dma_channel;
- struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- int cts_pin;
- int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
- unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
- uart->lsr |= (lsr & (BI|FE|PE|OE));
- return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
- uart->lsr = 0;
- bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
struct bfin_serial_res {
unsigned long uart_base_addr;
int uart_irq;
@@ -145,3 +75,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
};
#define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index a12d4b6a221..baa096fc724 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -1,7 +1,7 @@
/*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
*/
#ifndef _MACH_BLACKFIN_H_
@@ -10,34 +10,24 @@
#define BF537_FAMILY
#include "bf537.h"
-#include "defBF534.h"
#include "anomaly.h"
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF534
+# include "defBF534.h"
+#endif
#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
-#include "defBF537.h"
+# include "defBF537.h"
#endif
#if !defined(__ASSEMBLY__)
-#include "cdefBF534.h"
-
-#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
-#include "cdefBF537.h"
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF534
+# include "cdefBF534.h"
+# endif
+# if defined(CONFIG_BF537) || defined(CONFIG_BF536)
+# include "cdefBF537.h"
+# endif
#endif
-#endif
-
-#define BFIN_UART_NR_PORTS 2
-
-#define OFFSET_THR 0x00 /* Transmit Holding register */
-#define OFFSET_RBR 0x00 /* Receive Buffer register */
-#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
-#define OFFSET_IER 0x04 /* Interrupt Enable Register */
-#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
-#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
-#define OFFSET_LCR 0x0C /* Line Control Register */
-#define OFFSET_MCR 0x10 /* Modem Control Register */
-#define OFFSET_LSR 0x14 /* Line Status Register */
-#define OFFSET_MSR 0x18 /* Modem Status Register */
-#define OFFSET_SCR 0x1C /* SCR Scratch Register */
-#define OFFSET_GCTL 0x24 /* Global Control Register */
#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
index fbeb35e1413..563ede90733 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF534_H
#define _CDEF_BF534_H
-#include <asm/blackfin.h>
-
-/* Include all Core registers and bit definitions */
-#include "defBF534.h"
-
-/* Include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
@@ -355,16 +347,10 @@
#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT,val)
/* DMA Traffic Control Registers */
-#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER,val)
-#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT,val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER,val)
-#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT,val)
+#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER,val)
+#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT,val)
/* DMA Controller */
#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
@@ -1747,7 +1733,4 @@
#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT,val)
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
#endif /* _CDEF_BF534_H */
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
index 9363c399042..19ec21ea150 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
@@ -10,9 +10,6 @@
/* Include MMRs Common to BF534 */
#include "cdefBF534.h"
-/* Include all Core registers and bit definitions */
-#include "defBF537.h"
-
/* Include Macro "Defines" For EMAC (Unique to BF536/BF537 */
/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
#define bfin_read_EMAC_OPMODE() bfin_read32(EMAC_OPMODE)
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 0323e6bacda..32529a03b26 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,9 +7,6 @@
#ifndef _DEF_BF534_H
#define _DEF_BF534_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
/************************************************************************************
** System MMR Register Map
*************************************************************************************/
@@ -193,12 +190,8 @@
#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
/* DMA Traffic Control Registers */
-#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
-#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
+#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
+#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
@@ -1029,48 +1022,6 @@
#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks */
-#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
-#define STB 0x04 /* Stop Bits */
-#define PEN 0x08 /* Parity Enable */
-#define EPS 0x10 /* Even Parity Select */
-#define STP 0x20 /* Stick Parity */
-#define SB 0x40 /* Set Break */
-#define DLAB 0x80 /* Divisor Latch Access */
-
-/* UARTx_MCR Mask */
-#define LOOP_ENA 0x10 /* Loopback Mode Enable */
-#define LOOP_ENA_P 0x04
-/* UARTx_LSR Masks */
-#define DR 0x01 /* Data Ready */
-#define OE 0x02 /* Overrun Error */
-#define PE 0x04 /* Parity Error */
-#define FE 0x08 /* Framing Error */
-#define BI 0x10 /* Break Interrupt */
-#define THRE 0x20 /* THR Empty */
-#define TEMT 0x40 /* TSR and UART_THR Empty */
-
-/* UARTx_IER Masks */
-#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
-#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
-#define ELSI 0x04 /* Enable RX Status Interrupt */
-
-/* UARTx_IIR Masks */
-#define NINT 0x01 /* Pending Interrupt */
-#define IIR_TX_READY 0x02 /* UART_THR empty */
-#define IIR_RX_READY 0x04 /* Receive data ready */
-#define IIR_LINE_CHANGE 0x06 /* Receive line status */
-#define IIR_STATUS 0x06
-
-/* UARTx_GCTL Masks */
-#define UCEN 0x01 /* Enable UARTx Clocks */
-#define IREN 0x02 /* Enable IrDA Mode */
-#define TPOLC 0x04 /* IrDA TX Polarity Change */
-#define RPOLC 0x08 /* IrDA RX Polarity Change */
-#define FPE 0x10 /* Force Parity Error On Transmit */
-#define FFE 0x20 /* Force Framing Error On Transmit */
-
/* **************** GENERAL PURPOSE TIMER MASKS **********************/
/* TIMER_ENABLE Masks */
#define TIMEN0 0x0001 /* Enable Timer 0 */
@@ -1141,62 +1092,6 @@
#define EMU_RUN 0x0200 /* Emulation Behavior Select */
#define ERR_TYP 0xC000 /* Error Type */
-/* ****************** GPIO PORTS F, G, H MASKS ***********************/
-/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
-/* Port F Masks */
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* Port G Masks */
-#define PG0 0x0001
-#define PG1 0x0002
-#define PG2 0x0004
-#define PG3 0x0008
-#define PG4 0x0010
-#define PG5 0x0020
-#define PG6 0x0040
-#define PG7 0x0080
-#define PG8 0x0100
-#define PG9 0x0200
-#define PG10 0x0400
-#define PG11 0x0800
-#define PG12 0x1000
-#define PG13 0x2000
-#define PG14 0x4000
-#define PG15 0x8000
-
-/* Port H Masks */
-#define PH0 0x0001
-#define PH1 0x0002
-#define PH2 0x0004
-#define PH3 0x0008
-#define PH4 0x0010
-#define PH5 0x0020
-#define PH6 0x0040
-#define PH7 0x0080
-#define PH8 0x0100
-#define PH9 0x0200
-#define PH10 0x0400
-#define PH11 0x0800
-#define PH12 0x1000
-#define PH13 0x2000
-#define PH14 0x4000
-#define PH15 0x8000
-
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
/* EBIU_AMGCTL Masks */
#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF537.h b/arch/blackfin/mach-bf537/include/mach/defBF537.h
index 8cb5d5cf0c9..3d471d75268 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF537.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,9 +7,6 @@
#ifndef _DEF_BF537_H
#define _DEF_BF537_H
-/* Include all Core registers and bit definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
/* Include all MMR and bit defines common to BF534 */
#include "defBF534.h"
diff --git a/arch/blackfin/mach-bf537/include/mach/gpio.h b/arch/blackfin/mach-bf537/include/mach/gpio.h
index f80c2995efd..fba606b699c 100644
--- a/arch/blackfin/mach-bf537/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf537/include/mach/gpio.h
@@ -62,4 +62,8 @@
#define PORT_G GPIO_PG0
#define PORT_H GPIO_PH0
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf537/include/mach/pll.h b/arch/blackfin/mach-bf537/include/mach/pll.h
index 169c106d0ed..94cca674d83 100644
--- a/arch/blackfin/mach-bf537/include/mach/pll.h
+++ b/arch/blackfin/mach-bf537/include/mach/pll.h
@@ -1,57 +1 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
- unsigned long flags, iwr;
-
- if (val == bfin_read_PLL_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr = bfin_read32(SIC_IWR);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
- bfin_write16(PLL_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR, iwr);
- hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
- unsigned long flags, iwr;
-
- if (val == bfin_read_VR_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr = bfin_read32(SIC_IWR);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
- bfin_write16(VR_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR, iwr);
- hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index c6fb0a52f84..e61424ef35e 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -82,7 +82,7 @@ static struct resource bfin_uart0_resources[] = {
#endif
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -125,7 +125,7 @@ static struct resource bfin_uart1_resources[] = {
},
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
@@ -168,7 +168,7 @@ static struct resource bfin_uart2_resources[] = {
},
};
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
P_UART2_TX, P_UART2_RX, 0
};
@@ -282,9 +282,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -316,9 +316,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
@@ -350,7 +350,7 @@ static struct resource bfin_sport2_uart_resources[] = {
},
};
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
};
@@ -384,7 +384,7 @@ static struct resource bfin_sport3_uart_resources[] = {
},
};
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
};
@@ -402,7 +402,7 @@ static struct platform_device bfin_sport3_uart_device = {
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
diff --git a/arch/blackfin/mach-bf538/dma.c b/arch/blackfin/mach-bf538/dma.c
index 5dc02258921..cce8ef5a5ce 100644
--- a/arch/blackfin/mach-bf538/dma.c
+++ b/arch/blackfin/mach-bf538/dma.c
@@ -11,7 +11,7 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
@@ -32,14 +32,14 @@ struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA17_NEXT_DESC_PTR,
(struct dma_register *) DMA18_NEXT_DESC_PTR,
(struct dma_register *) DMA19_NEXT_DESC_PTR,
- (struct dma_register *) MDMA0_D0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA0_S0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA0_D1_NEXT_DESC_PTR,
- (struct dma_register *) MDMA0_S1_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_D0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_S0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_D1_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_S1_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
};
EXPORT_SYMBOL(dma_io_base_addr);
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
new file mode 100644
index 00000000000..c66e2760aad
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS 3
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
index 5c148142f04..beb502e9cb3 100644
--- a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/serial.h>
#include <asm/dma.h>
#include <asm/portmux.h>
-#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v) bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
# define CONFIG_SERIAL_BFIN_CTSRTS
@@ -54,50 +27,6 @@
# endif
#endif
-#define BFIN_UART_TX_FIFO_SIZE 2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
- struct uart_port port;
- unsigned int old_status;
- int status_irq;
- unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- int tx_done;
- int tx_count;
- struct circ_buf rx_dma_buf;
- struct timer_list rx_dma_timer;
- int rx_dma_nrows;
- unsigned int tx_dma_channel;
- unsigned int rx_dma_channel;
- struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- struct timer_list cts_timer;
- int cts_pin;
- int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
- unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
- uart->lsr |= (lsr & (BI|FE|PE|OE));
- return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
- uart->lsr = 0;
- bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
struct bfin_serial_res {
unsigned long uart_base_addr;
int uart_irq;
@@ -160,3 +89,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
};
#define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 08b5eabb1ed..791d08400cf 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -10,31 +10,24 @@
#define BF538_FAMILY
#include "bf538.h"
-#include "defBF539.h"
#include "anomaly.h"
-
-#if !defined(__ASSEMBLY__)
-#include "cdefBF538.h"
-
-#if defined(CONFIG_BF539)
-#include "cdefBF539.h"
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF538
+# include "defBF538.h"
#endif
+#ifdef CONFIG_BF539
+# include "defBF539.h"
#endif
-#define BFIN_UART_NR_PORTS 3
-
-#define OFFSET_THR 0x00 /* Transmit Holding register */
-#define OFFSET_RBR 0x00 /* Receive Buffer register */
-#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
-#define OFFSET_IER 0x04 /* Interrupt Enable Register */
-#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
-#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
-#define OFFSET_LCR 0x0C /* Line Control Register */
-#define OFFSET_MCR 0x10 /* Modem Control Register */
-#define OFFSET_LSR 0x14 /* Line Status Register */
-#define OFFSET_MSR 0x18 /* Modem Status Register */
-#define OFFSET_SCR 0x1C /* SCR Scratch Register */
-#define OFFSET_GCTL 0x24 /* Global Control Register */
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF538
+# include "cdefBF538.h"
+# endif
+# ifdef CONFIG_BF539
+# include "cdefBF539.h"
+# endif
+#endif
#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
index 085b06b8c0a..f6a56792180 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF538_H
#define _CDEF_BF538_H
-#include <asm/blackfin.h>
-
-/*include all Core registers and bit definitions*/
-#include "defBF539.h"
-
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
#define bfin_writePTR(addr, val) bfin_write32(addr, val)
#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
@@ -487,10 +479,10 @@
#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
-#define bfin_read_DMA0_TC_PER() bfin_read16(DMA0_TC_PER)
-#define bfin_write_DMA0_TC_PER(val) bfin_write16(DMA0_TC_PER, val)
-#define bfin_read_DMA0_TC_CNT() bfin_read16(DMA0_TC_CNT)
-#define bfin_write_DMA0_TC_CNT(val) bfin_write16(DMA0_TC_CNT, val)
+#define bfin_read_DMAC0_TC_PER() bfin_read16(DMAC0_TC_PER)
+#define bfin_write_DMAC0_TC_PER(val) bfin_write16(DMAC0_TC_PER, val)
+#define bfin_read_DMAC0_TC_CNT() bfin_read16(DMAC0_TC_CNT)
+#define bfin_write_DMAC0_TC_CNT(val) bfin_write16(DMAC0_TC_CNT, val)
#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_readPTR(DMA0_NEXT_DESC_PTR)
#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_writePTR(DMA0_NEXT_DESC_PTR, val)
#define bfin_read_DMA0_START_ADDR() bfin_readPTR(DMA0_START_ADDR)
@@ -699,10 +691,10 @@
#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_TC_PER() bfin_read16(DMA1_TC_PER)
-#define bfin_write_DMA1_TC_PER(val) bfin_write16(DMA1_TC_PER, val)
-#define bfin_read_DMA1_TC_CNT() bfin_read16(DMA1_TC_CNT)
-#define bfin_write_DMA1_TC_CNT(val) bfin_write16(DMA1_TC_CNT, val)
+#define bfin_read_DMAC1_TC_PER() bfin_read16(DMAC1_TC_PER)
+#define bfin_write_DMAC1_TC_PER(val) bfin_write16(DMAC1_TC_PER, val)
+#define bfin_read_DMAC1_TC_CNT() bfin_read16(DMAC1_TC_CNT)
+#define bfin_write_DMAC1_TC_CNT(val) bfin_write16(DMAC1_TC_CNT, val)
#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_readPTR(DMA8_NEXT_DESC_PTR)
#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_writePTR(DMA8_NEXT_DESC_PTR, val)
#define bfin_read_DMA8_START_ADDR() bfin_readPTR(DMA8_START_ADDR)
@@ -1015,273 +1007,214 @@
#define bfin_write_DMA19_CURR_X_COUNT(val) bfin_write16(DMA19_CURR_X_COUNT, val)
#define bfin_read_DMA19_CURR_Y_COUNT() bfin_read16(DMA19_CURR_Y_COUNT)
#define bfin_write_DMA19_CURR_Y_COUNT(val) bfin_write16(DMA19_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_D0_START_ADDR() bfin_readPTR(MDMA0_D0_START_ADDR)
-#define bfin_write_MDMA0_D0_START_ADDR(val) bfin_writePTR(MDMA0_D0_START_ADDR, val)
-#define bfin_read_MDMA0_D0_CONFIG() bfin_read16(MDMA0_D0_CONFIG)
-#define bfin_write_MDMA0_D0_CONFIG(val) bfin_write16(MDMA0_D0_CONFIG, val)
-#define bfin_read_MDMA0_D0_X_COUNT() bfin_read16(MDMA0_D0_X_COUNT)
-#define bfin_write_MDMA0_D0_X_COUNT(val) bfin_write16(MDMA0_D0_X_COUNT, val)
-#define bfin_read_MDMA0_D0_X_MODIFY() bfin_read16(MDMA0_D0_X_MODIFY)
-#define bfin_write_MDMA0_D0_X_MODIFY(val) bfin_write16(MDMA0_D0_X_MODIFY, val)
-#define bfin_read_MDMA0_D0_Y_COUNT() bfin_read16(MDMA0_D0_Y_COUNT)
-#define bfin_write_MDMA0_D0_Y_COUNT(val) bfin_write16(MDMA0_D0_Y_COUNT, val)
-#define bfin_read_MDMA0_D0_Y_MODIFY() bfin_read16(MDMA0_D0_Y_MODIFY)
-#define bfin_write_MDMA0_D0_Y_MODIFY(val) bfin_write16(MDMA0_D0_Y_MODIFY, val)
-#define bfin_read_MDMA0_D0_CURR_DESC_PTR() bfin_readPTR(MDMA0_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA0_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_D0_CURR_ADDR() bfin_readPTR(MDMA0_D0_CURR_ADDR)
-#define bfin_write_MDMA0_D0_CURR_ADDR(val) bfin_writePTR(MDMA0_D0_CURR_ADDR, val)
-#define bfin_read_MDMA0_D0_IRQ_STATUS() bfin_read16(MDMA0_D0_IRQ_STATUS)
-#define bfin_write_MDMA0_D0_IRQ_STATUS(val) bfin_write16(MDMA0_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA0_D0_PERIPHERAL_MAP() bfin_read16(MDMA0_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_D0_CURR_X_COUNT() bfin_read16(MDMA0_D0_CURR_X_COUNT)
-#define bfin_write_MDMA0_D0_CURR_X_COUNT(val) bfin_write16(MDMA0_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_D0_CURR_Y_COUNT() bfin_read16(MDMA0_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA0_D0_CURR_Y_COUNT(val) bfin_write16(MDMA0_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_S0_START_ADDR() bfin_readPTR(MDMA0_S0_START_ADDR)
-#define bfin_write_MDMA0_S0_START_ADDR(val) bfin_writePTR(MDMA0_S0_START_ADDR, val)
-#define bfin_read_MDMA0_S0_CONFIG() bfin_read16(MDMA0_S0_CONFIG)
-#define bfin_write_MDMA0_S0_CONFIG(val) bfin_write16(MDMA0_S0_CONFIG, val)
-#define bfin_read_MDMA0_S0_X_COUNT() bfin_read16(MDMA0_S0_X_COUNT)
-#define bfin_write_MDMA0_S0_X_COUNT(val) bfin_write16(MDMA0_S0_X_COUNT, val)
-#define bfin_read_MDMA0_S0_X_MODIFY() bfin_read16(MDMA0_S0_X_MODIFY)
-#define bfin_write_MDMA0_S0_X_MODIFY(val) bfin_write16(MDMA0_S0_X_MODIFY, val)
-#define bfin_read_MDMA0_S0_Y_COUNT() bfin_read16(MDMA0_S0_Y_COUNT)
-#define bfin_write_MDMA0_S0_Y_COUNT(val) bfin_write16(MDMA0_S0_Y_COUNT, val)
-#define bfin_read_MDMA0_S0_Y_MODIFY() bfin_read16(MDMA0_S0_Y_MODIFY)
-#define bfin_write_MDMA0_S0_Y_MODIFY(val) bfin_write16(MDMA0_S0_Y_MODIFY, val)
-#define bfin_read_MDMA0_S0_CURR_DESC_PTR() bfin_readPTR(MDMA0_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA0_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_S0_CURR_ADDR() bfin_readPTR(MDMA0_S0_CURR_ADDR)
-#define bfin_write_MDMA0_S0_CURR_ADDR(val) bfin_writePTR(MDMA0_S0_CURR_ADDR, val)
-#define bfin_read_MDMA0_S0_IRQ_STATUS() bfin_read16(MDMA0_S0_IRQ_STATUS)
-#define bfin_write_MDMA0_S0_IRQ_STATUS(val) bfin_write16(MDMA0_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA0_S0_PERIPHERAL_MAP() bfin_read16(MDMA0_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_S0_CURR_X_COUNT() bfin_read16(MDMA0_S0_CURR_X_COUNT)
-#define bfin_write_MDMA0_S0_CURR_X_COUNT(val) bfin_write16(MDMA0_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_S0_CURR_Y_COUNT() bfin_read16(MDMA0_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA0_S0_CURR_Y_COUNT(val) bfin_write16(MDMA0_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_D1_START_ADDR() bfin_readPTR(MDMA0_D1_START_ADDR)
-#define bfin_write_MDMA0_D1_START_ADDR(val) bfin_writePTR(MDMA0_D1_START_ADDR, val)
-#define bfin_read_MDMA0_D1_CONFIG() bfin_read16(MDMA0_D1_CONFIG)
-#define bfin_write_MDMA0_D1_CONFIG(val) bfin_write16(MDMA0_D1_CONFIG, val)
-#define bfin_read_MDMA0_D1_X_COUNT() bfin_read16(MDMA0_D1_X_COUNT)
-#define bfin_write_MDMA0_D1_X_COUNT(val) bfin_write16(MDMA0_D1_X_COUNT, val)
-#define bfin_read_MDMA0_D1_X_MODIFY() bfin_read16(MDMA0_D1_X_MODIFY)
-#define bfin_write_MDMA0_D1_X_MODIFY(val) bfin_write16(MDMA0_D1_X_MODIFY, val)
-#define bfin_read_MDMA0_D1_Y_COUNT() bfin_read16(MDMA0_D1_Y_COUNT)
-#define bfin_write_MDMA0_D1_Y_COUNT(val) bfin_write16(MDMA0_D1_Y_COUNT, val)
-#define bfin_read_MDMA0_D1_Y_MODIFY() bfin_read16(MDMA0_D1_Y_MODIFY)
-#define bfin_write_MDMA0_D1_Y_MODIFY(val) bfin_write16(MDMA0_D1_Y_MODIFY, val)
-#define bfin_read_MDMA0_D1_CURR_DESC_PTR() bfin_readPTR(MDMA0_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA0_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_D1_CURR_ADDR() bfin_readPTR(MDMA0_D1_CURR_ADDR)
-#define bfin_write_MDMA0_D1_CURR_ADDR(val) bfin_writePTR(MDMA0_D1_CURR_ADDR, val)
-#define bfin_read_MDMA0_D1_IRQ_STATUS() bfin_read16(MDMA0_D1_IRQ_STATUS)
-#define bfin_write_MDMA0_D1_IRQ_STATUS(val) bfin_write16(MDMA0_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA0_D1_PERIPHERAL_MAP() bfin_read16(MDMA0_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_D1_CURR_X_COUNT() bfin_read16(MDMA0_D1_CURR_X_COUNT)
-#define bfin_write_MDMA0_D1_CURR_X_COUNT(val) bfin_write16(MDMA0_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_D1_CURR_Y_COUNT() bfin_read16(MDMA0_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA0_D1_CURR_Y_COUNT(val) bfin_write16(MDMA0_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_S1_START_ADDR() bfin_readPTR(MDMA0_S1_START_ADDR)
-#define bfin_write_MDMA0_S1_START_ADDR(val) bfin_writePTR(MDMA0_S1_START_ADDR, val)
-#define bfin_read_MDMA0_S1_CONFIG() bfin_read16(MDMA0_S1_CONFIG)
-#define bfin_write_MDMA0_S1_CONFIG(val) bfin_write16(MDMA0_S1_CONFIG, val)
-#define bfin_read_MDMA0_S1_X_COUNT() bfin_read16(MDMA0_S1_X_COUNT)
-#define bfin_write_MDMA0_S1_X_COUNT(val) bfin_write16(MDMA0_S1_X_COUNT, val)
-#define bfin_read_MDMA0_S1_X_MODIFY() bfin_read16(MDMA0_S1_X_MODIFY)
-#define bfin_write_MDMA0_S1_X_MODIFY(val) bfin_write16(MDMA0_S1_X_MODIFY, val)
-#define bfin_read_MDMA0_S1_Y_COUNT() bfin_read16(MDMA0_S1_Y_COUNT)
-#define bfin_write_MDMA0_S1_Y_COUNT(val) bfin_write16(MDMA0_S1_Y_COUNT, val)
-#define bfin_read_MDMA0_S1_Y_MODIFY() bfin_read16(MDMA0_S1_Y_MODIFY)
-#define bfin_write_MDMA0_S1_Y_MODIFY(val) bfin_write16(MDMA0_S1_Y_MODIFY, val)
-#define bfin_read_MDMA0_S1_CURR_DESC_PTR() bfin_readPTR(MDMA0_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA0_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_S1_CURR_ADDR() bfin_readPTR(MDMA0_S1_CURR_ADDR)
-#define bfin_write_MDMA0_S1_CURR_ADDR(val) bfin_writePTR(MDMA0_S1_CURR_ADDR, val)
-#define bfin_read_MDMA0_S1_IRQ_STATUS() bfin_read16(MDMA0_S1_IRQ_STATUS)
-#define bfin_write_MDMA0_S1_IRQ_STATUS(val) bfin_write16(MDMA0_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA0_S1_PERIPHERAL_MAP() bfin_read16(MDMA0_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_S1_CURR_X_COUNT() bfin_read16(MDMA0_S1_CURR_X_COUNT)
-#define bfin_write_MDMA0_S1_CURR_X_COUNT(val) bfin_write16(MDMA0_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_S1_CURR_Y_COUNT() bfin_read16(MDMA0_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA0_S1_CURR_Y_COUNT(val) bfin_write16(MDMA0_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_D0_START_ADDR() bfin_readPTR(MDMA1_D0_START_ADDR)
-#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_writePTR(MDMA1_D0_START_ADDR, val)
-#define bfin_read_MDMA1_D0_CONFIG() bfin_read16(MDMA1_D0_CONFIG)
-#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG, val)
-#define bfin_read_MDMA1_D0_X_COUNT() bfin_read16(MDMA1_D0_X_COUNT)
-#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT, val)
-#define bfin_read_MDMA1_D0_X_MODIFY() bfin_read16(MDMA1_D0_X_MODIFY)
-#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY, val)
-#define bfin_read_MDMA1_D0_Y_COUNT() bfin_read16(MDMA1_D0_Y_COUNT)
-#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT, val)
-#define bfin_read_MDMA1_D0_Y_MODIFY() bfin_read16(MDMA1_D0_Y_MODIFY)
-#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY, val)
-#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_readPTR(MDMA1_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_readPTR(MDMA1_D0_CURR_ADDR)
-#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_writePTR(MDMA1_D0_CURR_ADDR, val)
-#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS)
-#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT)
-#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_S0_START_ADDR() bfin_readPTR(MDMA1_S0_START_ADDR)
-#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_writePTR(MDMA1_S0_START_ADDR, val)
-#define bfin_read_MDMA1_S0_CONFIG() bfin_read16(MDMA1_S0_CONFIG)
-#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG, val)
-#define bfin_read_MDMA1_S0_X_COUNT() bfin_read16(MDMA1_S0_X_COUNT)
-#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT, val)
-#define bfin_read_MDMA1_S0_X_MODIFY() bfin_read16(MDMA1_S0_X_MODIFY)
-#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY, val)
-#define bfin_read_MDMA1_S0_Y_COUNT() bfin_read16(MDMA1_S0_Y_COUNT)
-#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT, val)
-#define bfin_read_MDMA1_S0_Y_MODIFY() bfin_read16(MDMA1_S0_Y_MODIFY)
-#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY, val)
-#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_readPTR(MDMA1_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_readPTR(MDMA1_S0_CURR_ADDR)
-#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_writePTR(MDMA1_S0_CURR_ADDR, val)
-#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS)
-#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT)
-#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_D1_START_ADDR() bfin_readPTR(MDMA1_D1_START_ADDR)
-#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_writePTR(MDMA1_D1_START_ADDR, val)
-#define bfin_read_MDMA1_D1_CONFIG() bfin_read16(MDMA1_D1_CONFIG)
-#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG, val)
-#define bfin_read_MDMA1_D1_X_COUNT() bfin_read16(MDMA1_D1_X_COUNT)
-#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT, val)
-#define bfin_read_MDMA1_D1_X_MODIFY() bfin_read16(MDMA1_D1_X_MODIFY)
-#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY, val)
-#define bfin_read_MDMA1_D1_Y_COUNT() bfin_read16(MDMA1_D1_Y_COUNT)
-#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT, val)
-#define bfin_read_MDMA1_D1_Y_MODIFY() bfin_read16(MDMA1_D1_Y_MODIFY)
-#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY, val)
-#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_readPTR(MDMA1_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_readPTR(MDMA1_D1_CURR_ADDR)
-#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_writePTR(MDMA1_D1_CURR_ADDR, val)
-#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS)
-#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT)
-#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_S1_START_ADDR() bfin_readPTR(MDMA1_S1_START_ADDR)
-#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_writePTR(MDMA1_S1_START_ADDR, val)
-#define bfin_read_MDMA1_S1_CONFIG() bfin_read16(MDMA1_S1_CONFIG)
-#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG, val)
-#define bfin_read_MDMA1_S1_X_COUNT() bfin_read16(MDMA1_S1_X_COUNT)
-#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT, val)
-#define bfin_read_MDMA1_S1_X_MODIFY() bfin_read16(MDMA1_S1_X_MODIFY)
-#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY, val)
-#define bfin_read_MDMA1_S1_Y_COUNT() bfin_read16(MDMA1_S1_Y_COUNT)
-#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT, val)
-#define bfin_read_MDMA1_S1_Y_MODIFY() bfin_read16(MDMA1_S1_Y_MODIFY)
-#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY, val)
-#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_readPTR(MDMA1_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_readPTR(MDMA1_S1_CURR_ADDR)
-#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_writePTR(MDMA1_S1_CURR_ADDR, val)
-#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS)
-#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT)
-#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT, val)
-
-#define bfin_read_MDMA_S0_CONFIG() bfin_read_MDMA0_S0_CONFIG()
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA0_S0_CONFIG(val)
-#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read_MDMA0_S0_IRQ_STATUS()
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA0_S0_IRQ_STATUS(val)
-#define bfin_read_MDMA_S0_X_MODIFY() bfin_read_MDMA0_S0_X_MODIFY()
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA0_S0_X_MODIFY(val)
-#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read_MDMA0_S0_Y_MODIFY()
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA0_S0_Y_MODIFY(val)
-#define bfin_read_MDMA_S0_X_COUNT() bfin_read_MDMA0_S0_X_COUNT()
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA0_S0_X_COUNT(val)
-#define bfin_read_MDMA_S0_Y_COUNT() bfin_read_MDMA0_S0_Y_COUNT()
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA0_S0_Y_COUNT(val)
-#define bfin_read_MDMA_S0_START_ADDR() bfin_read_MDMA0_S0_START_ADDR()
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA0_S0_START_ADDR(val)
-#define bfin_read_MDMA_D0_CONFIG() bfin_read_MDMA0_D0_CONFIG()
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA0_D0_CONFIG(val)
-#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read_MDMA0_D0_IRQ_STATUS()
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA0_D0_IRQ_STATUS(val)
-#define bfin_read_MDMA_D0_X_MODIFY() bfin_read_MDMA0_D0_X_MODIFY()
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA0_D0_X_MODIFY(val)
-#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read_MDMA0_D0_Y_MODIFY()
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA0_D0_Y_MODIFY(val)
-#define bfin_read_MDMA_D0_X_COUNT() bfin_read_MDMA0_D0_X_COUNT()
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA0_D0_X_COUNT(val)
-#define bfin_read_MDMA_D0_Y_COUNT() bfin_read_MDMA0_D0_Y_COUNT()
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA0_D0_Y_COUNT(val)
-#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA0_D0_START_ADDR()
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA0_D0_START_ADDR(val)
-
-#define bfin_read_MDMA_S1_CONFIG() bfin_read_MDMA0_S1_CONFIG()
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA0_S1_CONFIG(val)
-#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read_MDMA0_S1_IRQ_STATUS()
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA0_S1_IRQ_STATUS(val)
-#define bfin_read_MDMA_S1_X_MODIFY() bfin_read_MDMA0_S1_X_MODIFY()
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA0_S1_X_MODIFY(val)
-#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read_MDMA0_S1_Y_MODIFY()
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA0_S1_Y_MODIFY(val)
-#define bfin_read_MDMA_S1_X_COUNT() bfin_read_MDMA0_S1_X_COUNT()
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA0_S1_X_COUNT(val)
-#define bfin_read_MDMA_S1_Y_COUNT() bfin_read_MDMA0_S1_Y_COUNT()
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA0_S1_Y_COUNT(val)
-#define bfin_read_MDMA_S1_START_ADDR() bfin_read_MDMA0_S1_START_ADDR()
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA0_S1_START_ADDR(val)
-#define bfin_read_MDMA_D1_CONFIG() bfin_read_MDMA0_D1_CONFIG()
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA0_D1_CONFIG(val)
-#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read_MDMA0_D1_IRQ_STATUS()
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA0_D1_IRQ_STATUS(val)
-#define bfin_read_MDMA_D1_X_MODIFY() bfin_read_MDMA0_D1_X_MODIFY()
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA0_D1_X_MODIFY(val)
-#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read_MDMA0_D1_Y_MODIFY()
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA0_D1_Y_MODIFY(val)
-#define bfin_read_MDMA_D1_X_COUNT() bfin_read_MDMA0_D1_X_COUNT()
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA0_D1_X_COUNT(val)
-#define bfin_read_MDMA_D1_Y_COUNT() bfin_read_MDMA0_D1_Y_COUNT()
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA0_D1_Y_COUNT(val)
-#define bfin_read_MDMA_D1_START_ADDR() bfin_read_MDMA0_D1_START_ADDR()
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA0_D1_START_ADDR(val)
-
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR() bfin_readPTR(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val) bfin_writePTR(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_readPTR(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR() bfin_readPTR(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_writePTR(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR() bfin_readPTR(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val) bfin_writePTR(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_readPTR(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR() bfin_readPTR(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_writePTR(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR() bfin_readPTR(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val) bfin_writePTR(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_readPTR(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR() bfin_readPTR(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_writePTR(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR() bfin_readPTR(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val) bfin_writePTR(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_readPTR(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR() bfin_readPTR(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_writePTR(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D2_NEXT_DESC_PTR() bfin_readPTR(MDMA_D2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D2_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D2_START_ADDR() bfin_readPTR(MDMA_D2_START_ADDR)
+#define bfin_write_MDMA_D2_START_ADDR(val) bfin_writePTR(MDMA_D2_START_ADDR, val)
+#define bfin_read_MDMA_D2_CONFIG() bfin_read16(MDMA_D2_CONFIG)
+#define bfin_write_MDMA_D2_CONFIG(val) bfin_write16(MDMA_D2_CONFIG, val)
+#define bfin_read_MDMA_D2_X_COUNT() bfin_read16(MDMA_D2_X_COUNT)
+#define bfin_write_MDMA_D2_X_COUNT(val) bfin_write16(MDMA_D2_X_COUNT, val)
+#define bfin_read_MDMA_D2_X_MODIFY() bfin_read16(MDMA_D2_X_MODIFY)
+#define bfin_write_MDMA_D2_X_MODIFY(val) bfin_write16(MDMA_D2_X_MODIFY, val)
+#define bfin_read_MDMA_D2_Y_COUNT() bfin_read16(MDMA_D2_Y_COUNT)
+#define bfin_write_MDMA_D2_Y_COUNT(val) bfin_write16(MDMA_D2_Y_COUNT, val)
+#define bfin_read_MDMA_D2_Y_MODIFY() bfin_read16(MDMA_D2_Y_MODIFY)
+#define bfin_write_MDMA_D2_Y_MODIFY(val) bfin_write16(MDMA_D2_Y_MODIFY, val)
+#define bfin_read_MDMA_D2_CURR_DESC_PTR() bfin_readPTR(MDMA_D2_CURR_DESC_PTR)
+#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D2_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D2_CURR_ADDR() bfin_readPTR(MDMA_D2_CURR_ADDR)
+#define bfin_write_MDMA_D2_CURR_ADDR(val) bfin_writePTR(MDMA_D2_CURR_ADDR, val)
+#define bfin_read_MDMA_D2_IRQ_STATUS() bfin_read16(MDMA_D2_IRQ_STATUS)
+#define bfin_write_MDMA_D2_IRQ_STATUS(val) bfin_write16(MDMA_D2_IRQ_STATUS, val)
+#define bfin_read_MDMA_D2_PERIPHERAL_MAP() bfin_read16(MDMA_D2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D2_CURR_X_COUNT() bfin_read16(MDMA_D2_CURR_X_COUNT)
+#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D2_CURR_Y_COUNT() bfin_read16(MDMA_D2_CURR_Y_COUNT)
+#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S2_NEXT_DESC_PTR() bfin_readPTR(MDMA_S2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S2_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S2_START_ADDR() bfin_readPTR(MDMA_S2_START_ADDR)
+#define bfin_write_MDMA_S2_START_ADDR(val) bfin_writePTR(MDMA_S2_START_ADDR, val)
+#define bfin_read_MDMA_S2_CONFIG() bfin_read16(MDMA_S2_CONFIG)
+#define bfin_write_MDMA_S2_CONFIG(val) bfin_write16(MDMA_S2_CONFIG, val)
+#define bfin_read_MDMA_S2_X_COUNT() bfin_read16(MDMA_S2_X_COUNT)
+#define bfin_write_MDMA_S2_X_COUNT(val) bfin_write16(MDMA_S2_X_COUNT, val)
+#define bfin_read_MDMA_S2_X_MODIFY() bfin_read16(MDMA_S2_X_MODIFY)
+#define bfin_write_MDMA_S2_X_MODIFY(val) bfin_write16(MDMA_S2_X_MODIFY, val)
+#define bfin_read_MDMA_S2_Y_COUNT() bfin_read16(MDMA_S2_Y_COUNT)
+#define bfin_write_MDMA_S2_Y_COUNT(val) bfin_write16(MDMA_S2_Y_COUNT, val)
+#define bfin_read_MDMA_S2_Y_MODIFY() bfin_read16(MDMA_S2_Y_MODIFY)
+#define bfin_write_MDMA_S2_Y_MODIFY(val) bfin_write16(MDMA_S2_Y_MODIFY, val)
+#define bfin_read_MDMA_S2_CURR_DESC_PTR() bfin_readPTR(MDMA_S2_CURR_DESC_PTR)
+#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S2_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S2_CURR_ADDR() bfin_readPTR(MDMA_S2_CURR_ADDR)
+#define bfin_write_MDMA_S2_CURR_ADDR(val) bfin_writePTR(MDMA_S2_CURR_ADDR, val)
+#define bfin_read_MDMA_S2_IRQ_STATUS() bfin_read16(MDMA_S2_IRQ_STATUS)
+#define bfin_write_MDMA_S2_IRQ_STATUS(val) bfin_write16(MDMA_S2_IRQ_STATUS, val)
+#define bfin_read_MDMA_S2_PERIPHERAL_MAP() bfin_read16(MDMA_S2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S2_CURR_X_COUNT() bfin_read16(MDMA_S2_CURR_X_COUNT)
+#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S2_CURR_Y_COUNT() bfin_read16(MDMA_S2_CURR_Y_COUNT)
+#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D3_NEXT_DESC_PTR() bfin_readPTR(MDMA_D3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D3_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D3_START_ADDR() bfin_readPTR(MDMA_D3_START_ADDR)
+#define bfin_write_MDMA_D3_START_ADDR(val) bfin_writePTR(MDMA_D3_START_ADDR, val)
+#define bfin_read_MDMA_D3_CONFIG() bfin_read16(MDMA_D3_CONFIG)
+#define bfin_write_MDMA_D3_CONFIG(val) bfin_write16(MDMA_D3_CONFIG, val)
+#define bfin_read_MDMA_D3_X_COUNT() bfin_read16(MDMA_D3_X_COUNT)
+#define bfin_write_MDMA_D3_X_COUNT(val) bfin_write16(MDMA_D3_X_COUNT, val)
+#define bfin_read_MDMA_D3_X_MODIFY() bfin_read16(MDMA_D3_X_MODIFY)
+#define bfin_write_MDMA_D3_X_MODIFY(val) bfin_write16(MDMA_D3_X_MODIFY, val)
+#define bfin_read_MDMA_D3_Y_COUNT() bfin_read16(MDMA_D3_Y_COUNT)
+#define bfin_write_MDMA_D3_Y_COUNT(val) bfin_write16(MDMA_D3_Y_COUNT, val)
+#define bfin_read_MDMA_D3_Y_MODIFY() bfin_read16(MDMA_D3_Y_MODIFY)
+#define bfin_write_MDMA_D3_Y_MODIFY(val) bfin_write16(MDMA_D3_Y_MODIFY, val)
+#define bfin_read_MDMA_D3_CURR_DESC_PTR() bfin_readPTR(MDMA_D3_CURR_DESC_PTR)
+#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D3_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D3_CURR_ADDR() bfin_readPTR(MDMA_D3_CURR_ADDR)
+#define bfin_write_MDMA_D3_CURR_ADDR(val) bfin_writePTR(MDMA_D3_CURR_ADDR, val)
+#define bfin_read_MDMA_D3_IRQ_STATUS() bfin_read16(MDMA_D3_IRQ_STATUS)
+#define bfin_write_MDMA_D3_IRQ_STATUS(val) bfin_write16(MDMA_D3_IRQ_STATUS, val)
+#define bfin_read_MDMA_D3_PERIPHERAL_MAP() bfin_read16(MDMA_D3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D3_CURR_X_COUNT() bfin_read16(MDMA_D3_CURR_X_COUNT)
+#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D3_CURR_Y_COUNT() bfin_read16(MDMA_D3_CURR_Y_COUNT)
+#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S3_NEXT_DESC_PTR() bfin_readPTR(MDMA_S3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S3_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S3_START_ADDR() bfin_readPTR(MDMA_S3_START_ADDR)
+#define bfin_write_MDMA_S3_START_ADDR(val) bfin_writePTR(MDMA_S3_START_ADDR, val)
+#define bfin_read_MDMA_S3_CONFIG() bfin_read16(MDMA_S3_CONFIG)
+#define bfin_write_MDMA_S3_CONFIG(val) bfin_write16(MDMA_S3_CONFIG, val)
+#define bfin_read_MDMA_S3_X_COUNT() bfin_read16(MDMA_S3_X_COUNT)
+#define bfin_write_MDMA_S3_X_COUNT(val) bfin_write16(MDMA_S3_X_COUNT, val)
+#define bfin_read_MDMA_S3_X_MODIFY() bfin_read16(MDMA_S3_X_MODIFY)
+#define bfin_write_MDMA_S3_X_MODIFY(val) bfin_write16(MDMA_S3_X_MODIFY, val)
+#define bfin_read_MDMA_S3_Y_COUNT() bfin_read16(MDMA_S3_Y_COUNT)
+#define bfin_write_MDMA_S3_Y_COUNT(val) bfin_write16(MDMA_S3_Y_COUNT, val)
+#define bfin_read_MDMA_S3_Y_MODIFY() bfin_read16(MDMA_S3_Y_MODIFY)
+#define bfin_write_MDMA_S3_Y_MODIFY(val) bfin_write16(MDMA_S3_Y_MODIFY, val)
+#define bfin_read_MDMA_S3_CURR_DESC_PTR() bfin_readPTR(MDMA_S3_CURR_DESC_PTR)
+#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S3_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S3_CURR_ADDR() bfin_readPTR(MDMA_S3_CURR_ADDR)
+#define bfin_write_MDMA_S3_CURR_ADDR(val) bfin_writePTR(MDMA_S3_CURR_ADDR, val)
+#define bfin_read_MDMA_S3_IRQ_STATUS() bfin_read16(MDMA_S3_IRQ_STATUS)
+#define bfin_write_MDMA_S3_IRQ_STATUS(val) bfin_write16(MDMA_S3_IRQ_STATUS, val)
+#define bfin_read_MDMA_S3_PERIPHERAL_MAP() bfin_read16(MDMA_S3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S3_CURR_X_COUNT() bfin_read16(MDMA_S3_CURR_X_COUNT)
+#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S3_CURR_Y_COUNT() bfin_read16(MDMA_S3_CURR_Y_COUNT)
+#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT, val)
#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
@@ -2024,7 +1957,4 @@
#define bfin_read_CAN_MB31_ID1() bfin_read16(CAN_MB31_ID1)
#define bfin_write_CAN_MB31_ID1(val) bfin_write16(CAN_MB31_ID1, val)
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
index 198c4bbc8e5..acc15f3aba3 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
@@ -1,6 +1,7 @@
-/* DO NOT EDIT THIS FILE
- * Automatically generated by generate-cdef-headers.xsl
- * DO NOT EDIT THIS FILE
+/*
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
*/
#ifndef _CDEF_BF539_H
@@ -9,7 +10,6 @@
/* Include MMRs Common to BF538 */
#include "cdefBF538.h"
-
#define bfin_read_MXVR_CONFIG() bfin_read16(MXVR_CONFIG)
#define bfin_write_MXVR_CONFIG(val) bfin_write16(MXVR_CONFIG, val)
#define bfin_read_MXVR_PLL_CTL_0() bfin_read32(MXVR_PLL_CTL_0)
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF538.h b/arch/blackfin/mach-bf538/include/mach/defBF538.h
new file mode 100644
index 00000000000..d27f81d6c4b
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/defBF538.h
@@ -0,0 +1,1825 @@
+/*
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
+ */
+
+#ifndef _DEF_BF538_H
+#define _DEF_BF538_H
+
+/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
+#define PLL_CTL 0xFFC00000 /* PLL Control register (16-bit) */
+#define PLL_DIV 0xFFC00004 /* PLL Divide Register (16-bit) */
+#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register (16-bit) */
+#define PLL_STAT 0xFFC0000C /* PLL Status register (16-bit) */
+#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count register (16-bit) */
+#define CHIPID 0xFFC00014 /* Chip ID Register */
+
+/* CHIPID Masks */
+#define CHIPID_VERSION 0xF0000000
+#define CHIPID_FAMILY 0x0FFFF000
+#define CHIPID_MANUFACTURE 0x00000FFE
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define SWRST 0xFFC00100 /* Software Reset Register (16-bit) */
+#define SYSCR 0xFFC00104 /* System Configuration registe */
+#define SIC_RVECT 0xFFC00108
+#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
+#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
+#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
+#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
+#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
+#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
+#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
+#define SIC_IMASK1 0xFFC00128 /* Interrupt Mask Register 1 */
+#define SIC_ISR1 0xFFC0012C /* Interrupt Status Register 1 */
+#define SIC_IWR1 0xFFC00130 /* Interrupt Wakeup Register 1 */
+#define SIC_IAR4 0xFFC00134 /* Interrupt Assignment Register 4 */
+#define SIC_IAR5 0xFFC00138 /* Interrupt Assignment Register 5 */
+#define SIC_IAR6 0xFFC0013C /* Interrupt Assignment Register 6 */
+
+
+/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
+#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
+#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
+#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
+
+
+/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define RTC_STAT 0xFFC00300 /* RTC Status Register */
+#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
+#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
+#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
+#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
+#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
+#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Register (alternate macro) */
+
+
+/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
+#define UART0_THR 0xFFC00400 /* Transmit Holding register */
+#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
+#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
+#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
+#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
+#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
+#define UART0_LCR 0xFFC0040C /* Line Control Register */
+#define UART0_MCR 0xFFC00410 /* Modem Control Register */
+#define UART0_LSR 0xFFC00414 /* Line Status Register */
+#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
+#define UART0_GCTL 0xFFC00424 /* Global Control Register */
+
+
+/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
+
+#define SPI0_CTL 0xFFC00500 /* SPI0 Control Register */
+#define SPI0_FLG 0xFFC00504 /* SPI0 Flag register */
+#define SPI0_STAT 0xFFC00508 /* SPI0 Status register */
+#define SPI0_TDBR 0xFFC0050C /* SPI0 Transmit Data Buffer Register */
+#define SPI0_RDBR 0xFFC00510 /* SPI0 Receive Data Buffer Register */
+#define SPI0_BAUD 0xFFC00514 /* SPI0 Baud rate Register */
+#define SPI0_SHADOW 0xFFC00518 /* SPI0_RDBR Shadow Register */
+#define SPI0_REGBASE SPI0_CTL
+
+
+/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
+#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
+#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
+#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
+#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
+
+#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
+#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
+#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
+#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
+
+#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
+#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
+#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
+#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
+
+#define TIMER_ENABLE 0xFFC00640 /* Timer Enable Register */
+#define TIMER_DISABLE 0xFFC00644 /* Timer Disable Register */
+#define TIMER_STATUS 0xFFC00648 /* Timer Status Register */
+
+
+/* Programmable Flags (0xFFC00700 - 0xFFC007FF) */
+#define FIO_FLAG_D 0xFFC00700 /* Flag Mask to directly specify state of pins */
+#define FIO_FLAG_C 0xFFC00704 /* Peripheral Interrupt Flag Register (clear) */
+#define FIO_FLAG_S 0xFFC00708 /* Peripheral Interrupt Flag Register (set) */
+#define FIO_FLAG_T 0xFFC0070C /* Flag Mask to directly toggle state of pins */
+#define FIO_MASKA_D 0xFFC00710 /* Flag Mask Interrupt A Register (set directly) */
+#define FIO_MASKA_C 0xFFC00714 /* Flag Mask Interrupt A Register (clear) */
+#define FIO_MASKA_S 0xFFC00718 /* Flag Mask Interrupt A Register (set) */
+#define FIO_MASKA_T 0xFFC0071C /* Flag Mask Interrupt A Register (toggle) */
+#define FIO_MASKB_D 0xFFC00720 /* Flag Mask Interrupt B Register (set directly) */
+#define FIO_MASKB_C 0xFFC00724 /* Flag Mask Interrupt B Register (clear) */
+#define FIO_MASKB_S 0xFFC00728 /* Flag Mask Interrupt B Register (set) */
+#define FIO_MASKB_T 0xFFC0072C /* Flag Mask Interrupt B Register (toggle) */
+#define FIO_DIR 0xFFC00730 /* Peripheral Flag Direction Register */
+#define FIO_POLAR 0xFFC00734 /* Flag Source Polarity Register */
+#define FIO_EDGE 0xFFC00738 /* Flag Source Sensitivity Register */
+#define FIO_BOTH 0xFFC0073C /* Flag Set on BOTH Edges Register */
+#define FIO_INEN 0xFFC00740 /* Flag Input Enable Register */
+
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
+#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
+#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
+#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
+#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
+#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
+#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
+#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
+#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
+#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
+#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
+#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
+#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
+#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
+#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
+#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
+#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
+#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
+#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
+#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
+#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
+#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
+
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
+#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
+#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
+#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
+#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
+#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
+#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
+#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
+#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
+#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
+#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
+#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
+#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
+#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
+#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
+#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
+#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
+#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
+#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
+#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
+#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
+#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+/* Asynchronous Memory Controller */
+#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
+#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
+#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
+
+/* SDRAM Controller */
+#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
+#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
+#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
+#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
+
+
+
+/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
+
+#define DMAC0_TC_PER 0xFFC00B0C /* DMA Controller 0 Traffic Control Periods Register */
+#define DMAC0_TC_CNT 0xFFC00B10 /* DMA Controller 0 Traffic Control Current Counts Register */
+
+
+
+/* DMA Controller 0 (0xFFC00C00 - 0xFFC00FFF) */
+
+#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
+#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
+#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
+#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
+#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
+#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
+#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
+#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
+#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
+#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
+#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
+#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
+#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
+
+#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
+#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
+#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
+#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
+#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
+#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
+#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
+#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
+#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
+#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
+#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
+#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
+#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
+
+#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
+#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
+#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
+#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
+#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
+#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
+#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
+#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
+#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
+#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
+#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
+#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
+#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
+
+#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
+#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
+#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
+#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
+#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
+#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
+#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
+#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
+#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
+#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
+#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
+#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
+#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
+
+#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
+#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
+#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
+#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
+#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
+#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
+#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
+#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
+#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
+#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
+#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
+#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
+#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
+
+#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
+#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
+#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
+#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
+#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
+#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
+#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
+#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
+#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
+#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
+#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
+#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
+#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
+
+#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
+#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
+#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
+#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
+#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
+#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
+#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
+#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
+#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
+#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
+#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
+#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
+#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
+
+#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
+#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
+#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
+#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
+#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
+#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
+#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
+#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
+#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
+#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
+#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
+#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
+#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
+
+#define MDMA_D0_NEXT_DESC_PTR 0xFFC00E00 /* MemDMA0 Stream 0 Destination Next Descriptor Pointer Register */
+#define MDMA_D0_START_ADDR 0xFFC00E04 /* MemDMA0 Stream 0 Destination Start Address Register */
+#define MDMA_D0_CONFIG 0xFFC00E08 /* MemDMA0 Stream 0 Destination Configuration Register */
+#define MDMA_D0_X_COUNT 0xFFC00E10 /* MemDMA0 Stream 0 Destination X Count Register */
+#define MDMA_D0_X_MODIFY 0xFFC00E14 /* MemDMA0 Stream 0 Destination X Modify Register */
+#define MDMA_D0_Y_COUNT 0xFFC00E18 /* MemDMA0 Stream 0 Destination Y Count Register */
+#define MDMA_D0_Y_MODIFY 0xFFC00E1C /* MemDMA0 Stream 0 Destination Y Modify Register */
+#define MDMA_D0_CURR_DESC_PTR 0xFFC00E20 /* MemDMA0 Stream 0 Destination Current Descriptor Pointer Register */
+#define MDMA_D0_CURR_ADDR 0xFFC00E24 /* MemDMA0 Stream 0 Destination Current Address Register */
+#define MDMA_D0_IRQ_STATUS 0xFFC00E28 /* MemDMA0 Stream 0 Destination Interrupt/Status Register */
+#define MDMA_D0_PERIPHERAL_MAP 0xFFC00E2C /* MemDMA0 Stream 0 Destination Peripheral Map Register */
+#define MDMA_D0_CURR_X_COUNT 0xFFC00E30 /* MemDMA0 Stream 0 Destination Current X Count Register */
+#define MDMA_D0_CURR_Y_COUNT 0xFFC00E38 /* MemDMA0 Stream 0 Destination Current Y Count Register */
+
+#define MDMA_S0_NEXT_DESC_PTR 0xFFC00E40 /* MemDMA0 Stream 0 Source Next Descriptor Pointer Register */
+#define MDMA_S0_START_ADDR 0xFFC00E44 /* MemDMA0 Stream 0 Source Start Address Register */
+#define MDMA_S0_CONFIG 0xFFC00E48 /* MemDMA0 Stream 0 Source Configuration Register */
+#define MDMA_S0_X_COUNT 0xFFC00E50 /* MemDMA0 Stream 0 Source X Count Register */
+#define MDMA_S0_X_MODIFY 0xFFC00E54 /* MemDMA0 Stream 0 Source X Modify Register */
+#define MDMA_S0_Y_COUNT 0xFFC00E58 /* MemDMA0 Stream 0 Source Y Count Register */
+#define MDMA_S0_Y_MODIFY 0xFFC00E5C /* MemDMA0 Stream 0 Source Y Modify Register */
+#define MDMA_S0_CURR_DESC_PTR 0xFFC00E60 /* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
+#define MDMA_S0_CURR_ADDR 0xFFC00E64 /* MemDMA0 Stream 0 Source Current Address Register */
+#define MDMA_S0_IRQ_STATUS 0xFFC00E68 /* MemDMA0 Stream 0 Source Interrupt/Status Register */
+#define MDMA_S0_PERIPHERAL_MAP 0xFFC00E6C /* MemDMA0 Stream 0 Source Peripheral Map Register */
+#define MDMA_S0_CURR_X_COUNT 0xFFC00E70 /* MemDMA0 Stream 0 Source Current X Count Register */
+#define MDMA_S0_CURR_Y_COUNT 0xFFC00E78 /* MemDMA0 Stream 0 Source Current Y Count Register */
+
+#define MDMA_D1_NEXT_DESC_PTR 0xFFC00E80 /* MemDMA0 Stream 1 Destination Next Descriptor Pointer Register */
+#define MDMA_D1_START_ADDR 0xFFC00E84 /* MemDMA0 Stream 1 Destination Start Address Register */
+#define MDMA_D1_CONFIG 0xFFC00E88 /* MemDMA0 Stream 1 Destination Configuration Register */
+#define MDMA_D1_X_COUNT 0xFFC00E90 /* MemDMA0 Stream 1 Destination X Count Register */
+#define MDMA_D1_X_MODIFY 0xFFC00E94 /* MemDMA0 Stream 1 Destination X Modify Register */
+#define MDMA_D1_Y_COUNT 0xFFC00E98 /* MemDMA0 Stream 1 Destination Y Count Register */
+#define MDMA_D1_Y_MODIFY 0xFFC00E9C /* MemDMA0 Stream 1 Destination Y Modify Register */
+#define MDMA_D1_CURR_DESC_PTR 0xFFC00EA0 /* MemDMA0 Stream 1 Destination Current Descriptor Pointer Register */
+#define MDMA_D1_CURR_ADDR 0xFFC00EA4 /* MemDMA0 Stream 1 Destination Current Address Register */
+#define MDMA_D1_IRQ_STATUS 0xFFC00EA8 /* MemDMA0 Stream 1 Destination Interrupt/Status Register */
+#define MDMA_D1_PERIPHERAL_MAP 0xFFC00EAC /* MemDMA0 Stream 1 Destination Peripheral Map Register */
+#define MDMA_D1_CURR_X_COUNT 0xFFC00EB0 /* MemDMA0 Stream 1 Destination Current X Count Register */
+#define MDMA_D1_CURR_Y_COUNT 0xFFC00EB8 /* MemDMA0 Stream 1 Destination Current Y Count Register */
+
+#define MDMA_S1_NEXT_DESC_PTR 0xFFC00EC0 /* MemDMA0 Stream 1 Source Next Descriptor Pointer Register */
+#define MDMA_S1_START_ADDR 0xFFC00EC4 /* MemDMA0 Stream 1 Source Start Address Register */
+#define MDMA_S1_CONFIG 0xFFC00EC8 /* MemDMA0 Stream 1 Source Configuration Register */
+#define MDMA_S1_X_COUNT 0xFFC00ED0 /* MemDMA0 Stream 1 Source X Count Register */
+#define MDMA_S1_X_MODIFY 0xFFC00ED4 /* MemDMA0 Stream 1 Source X Modify Register */
+#define MDMA_S1_Y_COUNT 0xFFC00ED8 /* MemDMA0 Stream 1 Source Y Count Register */
+#define MDMA_S1_Y_MODIFY 0xFFC00EDC /* MemDMA0 Stream 1 Source Y Modify Register */
+#define MDMA_S1_CURR_DESC_PTR 0xFFC00EE0 /* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
+#define MDMA_S1_CURR_ADDR 0xFFC00EE4 /* MemDMA0 Stream 1 Source Current Address Register */
+#define MDMA_S1_IRQ_STATUS 0xFFC00EE8 /* MemDMA0 Stream 1 Source Interrupt/Status Register */
+#define MDMA_S1_PERIPHERAL_MAP 0xFFC00EEC /* MemDMA0 Stream 1 Source Peripheral Map Register */
+#define MDMA_S1_CURR_X_COUNT 0xFFC00EF0 /* MemDMA0 Stream 1 Source Current X Count Register */
+#define MDMA_S1_CURR_Y_COUNT 0xFFC00EF8 /* MemDMA0 Stream 1 Source Current Y Count Register */
+
+
+/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
+#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
+#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
+#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
+#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
+#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
+
+
+/* Two-Wire Interface 0 (0xFFC01400 - 0xFFC014FF) */
+#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
+#define TWI0_CONTROL 0xFFC01404 /* TWI0 Master Internal Time Reference Register */
+#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
+#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
+#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
+#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
+#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
+#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
+#define TWI0_INT_STAT 0xFFC01420 /* TWI0 Master Interrupt Register */
+#define TWI0_INT_MASK 0xFFC01424 /* TWI0 Master Interrupt Mask Register */
+#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
+#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
+#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
+#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
+#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
+#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
+
+#define TWI0_REGBASE TWI0_CLKDIV
+
+/* the following are for backwards compatibility */
+#define TWI0_PRESCALE TWI0_CONTROL
+#define TWI0_INT_SRC TWI0_INT_STAT
+#define TWI0_INT_ENABLE TWI0_INT_MASK
+
+
+/* General-Purpose Ports (0xFFC01500 - 0xFFC015FF) */
+
+/* GPIO Port C Register Names */
+#define PORTCIO_FER 0xFFC01500 /* GPIO Pin Port C Configuration Register */
+#define PORTCIO 0xFFC01510 /* GPIO Pin Port C Data Register */
+#define PORTCIO_CLEAR 0xFFC01520 /* Clear GPIO Pin Port C Register */
+#define PORTCIO_SET 0xFFC01530 /* Set GPIO Pin Port C Register */
+#define PORTCIO_TOGGLE 0xFFC01540 /* Toggle GPIO Pin Port C Register */
+#define PORTCIO_DIR 0xFFC01550 /* GPIO Pin Port C Direction Register */
+#define PORTCIO_INEN 0xFFC01560 /* GPIO Pin Port C Input Enable Register */
+
+/* GPIO Port D Register Names */
+#define PORTDIO_FER 0xFFC01504 /* GPIO Pin Port D Configuration Register */
+#define PORTDIO 0xFFC01514 /* GPIO Pin Port D Data Register */
+#define PORTDIO_CLEAR 0xFFC01524 /* Clear GPIO Pin Port D Register */
+#define PORTDIO_SET 0xFFC01534 /* Set GPIO Pin Port D Register */
+#define PORTDIO_TOGGLE 0xFFC01544 /* Toggle GPIO Pin Port D Register */
+#define PORTDIO_DIR 0xFFC01554 /* GPIO Pin Port D Direction Register */
+#define PORTDIO_INEN 0xFFC01564 /* GPIO Pin Port D Input Enable Register */
+
+/* GPIO Port E Register Names */
+#define PORTEIO_FER 0xFFC01508 /* GPIO Pin Port E Configuration Register */
+#define PORTEIO 0xFFC01518 /* GPIO Pin Port E Data Register */
+#define PORTEIO_CLEAR 0xFFC01528 /* Clear GPIO Pin Port E Register */
+#define PORTEIO_SET 0xFFC01538 /* Set GPIO Pin Port E Register */
+#define PORTEIO_TOGGLE 0xFFC01548 /* Toggle GPIO Pin Port E Register */
+#define PORTEIO_DIR 0xFFC01558 /* GPIO Pin Port E Direction Register */
+#define PORTEIO_INEN 0xFFC01568 /* GPIO Pin Port E Input Enable Register */
+
+/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
+
+#define DMAC1_TC_PER 0xFFC01B0C /* DMA Controller 1 Traffic Control Periods Register */
+#define DMAC1_TC_CNT 0xFFC01B10 /* DMA Controller 1 Traffic Control Current Counts Register */
+
+
+
+/* DMA Controller 1 (0xFFC01C00 - 0xFFC01FFF) */
+#define DMA8_NEXT_DESC_PTR 0xFFC01C00 /* DMA Channel 8 Next Descriptor Pointer Register */
+#define DMA8_START_ADDR 0xFFC01C04 /* DMA Channel 8 Start Address Register */
+#define DMA8_CONFIG 0xFFC01C08 /* DMA Channel 8 Configuration Register */
+#define DMA8_X_COUNT 0xFFC01C10 /* DMA Channel 8 X Count Register */
+#define DMA8_X_MODIFY 0xFFC01C14 /* DMA Channel 8 X Modify Register */
+#define DMA8_Y_COUNT 0xFFC01C18 /* DMA Channel 8 Y Count Register */
+#define DMA8_Y_MODIFY 0xFFC01C1C /* DMA Channel 8 Y Modify Register */
+#define DMA8_CURR_DESC_PTR 0xFFC01C20 /* DMA Channel 8 Current Descriptor Pointer Register */
+#define DMA8_CURR_ADDR 0xFFC01C24 /* DMA Channel 8 Current Address Register */
+#define DMA8_IRQ_STATUS 0xFFC01C28 /* DMA Channel 8 Interrupt/Status Register */
+#define DMA8_PERIPHERAL_MAP 0xFFC01C2C /* DMA Channel 8 Peripheral Map Register */
+#define DMA8_CURR_X_COUNT 0xFFC01C30 /* DMA Channel 8 Current X Count Register */
+#define DMA8_CURR_Y_COUNT 0xFFC01C38 /* DMA Channel 8 Current Y Count Register */
+
+#define DMA9_NEXT_DESC_PTR 0xFFC01C40 /* DMA Channel 9 Next Descriptor Pointer Register */
+#define DMA9_START_ADDR 0xFFC01C44 /* DMA Channel 9 Start Address Register */
+#define DMA9_CONFIG 0xFFC01C48 /* DMA Channel 9 Configuration Register */
+#define DMA9_X_COUNT 0xFFC01C50 /* DMA Channel 9 X Count Register */
+#define DMA9_X_MODIFY 0xFFC01C54 /* DMA Channel 9 X Modify Register */
+#define DMA9_Y_COUNT 0xFFC01C58 /* DMA Channel 9 Y Count Register */
+#define DMA9_Y_MODIFY 0xFFC01C5C /* DMA Channel 9 Y Modify Register */
+#define DMA9_CURR_DESC_PTR 0xFFC01C60 /* DMA Channel 9 Current Descriptor Pointer Register */
+#define DMA9_CURR_ADDR 0xFFC01C64 /* DMA Channel 9 Current Address Register */
+#define DMA9_IRQ_STATUS 0xFFC01C68 /* DMA Channel 9 Interrupt/Status Register */
+#define DMA9_PERIPHERAL_MAP 0xFFC01C6C /* DMA Channel 9 Peripheral Map Register */
+#define DMA9_CURR_X_COUNT 0xFFC01C70 /* DMA Channel 9 Current X Count Register */
+#define DMA9_CURR_Y_COUNT 0xFFC01C78 /* DMA Channel 9 Current Y Count Register */
+
+#define DMA10_NEXT_DESC_PTR 0xFFC01C80 /* DMA Channel 10 Next Descriptor Pointer Register */
+#define DMA10_START_ADDR 0xFFC01C84 /* DMA Channel 10 Start Address Register */
+#define DMA10_CONFIG 0xFFC01C88 /* DMA Channel 10 Configuration Register */
+#define DMA10_X_COUNT 0xFFC01C90 /* DMA Channel 10 X Count Register */
+#define DMA10_X_MODIFY 0xFFC01C94 /* DMA Channel 10 X Modify Register */
+#define DMA10_Y_COUNT 0xFFC01C98 /* DMA Channel 10 Y Count Register */
+#define DMA10_Y_MODIFY 0xFFC01C9C /* DMA Channel 10 Y Modify Register */
+#define DMA10_CURR_DESC_PTR 0xFFC01CA0 /* DMA Channel 10 Current Descriptor Pointer Register */
+#define DMA10_CURR_ADDR 0xFFC01CA4 /* DMA Channel 10 Current Address Register */
+#define DMA10_IRQ_STATUS 0xFFC01CA8 /* DMA Channel 10 Interrupt/Status Register */
+#define DMA10_PERIPHERAL_MAP 0xFFC01CAC /* DMA Channel 10 Peripheral Map Register */
+#define DMA10_CURR_X_COUNT 0xFFC01CB0 /* DMA Channel 10 Current X Count Register */
+#define DMA10_CURR_Y_COUNT 0xFFC01CB8 /* DMA Channel 10 Current Y Count Register */
+
+#define DMA11_NEXT_DESC_PTR 0xFFC01CC0 /* DMA Channel 11 Next Descriptor Pointer Register */
+#define DMA11_START_ADDR 0xFFC01CC4 /* DMA Channel 11 Start Address Register */
+#define DMA11_CONFIG 0xFFC01CC8 /* DMA Channel 11 Configuration Register */
+#define DMA11_X_COUNT 0xFFC01CD0 /* DMA Channel 11 X Count Register */
+#define DMA11_X_MODIFY 0xFFC01CD4 /* DMA Channel 11 X Modify Register */
+#define DMA11_Y_COUNT 0xFFC01CD8 /* DMA Channel 11 Y Count Register */
+#define DMA11_Y_MODIFY 0xFFC01CDC /* DMA Channel 11 Y Modify Register */
+#define DMA11_CURR_DESC_PTR 0xFFC01CE0 /* DMA Channel 11 Current Descriptor Pointer Register */
+#define DMA11_CURR_ADDR 0xFFC01CE4 /* DMA Channel 11 Current Address Register */
+#define DMA11_IRQ_STATUS 0xFFC01CE8 /* DMA Channel 11 Interrupt/Status Register */
+#define DMA11_PERIPHERAL_MAP 0xFFC01CEC /* DMA Channel 11 Peripheral Map Register */
+#define DMA11_CURR_X_COUNT 0xFFC01CF0 /* DMA Channel 11 Current X Count Register */
+#define DMA11_CURR_Y_COUNT 0xFFC01CF8 /* DMA Channel 11 Current Y Count Register */
+
+#define DMA12_NEXT_DESC_PTR 0xFFC01D00 /* DMA Channel 12 Next Descriptor Pointer Register */
+#define DMA12_START_ADDR 0xFFC01D04 /* DMA Channel 12 Start Address Register */
+#define DMA12_CONFIG 0xFFC01D08 /* DMA Channel 12 Configuration Register */
+#define DMA12_X_COUNT 0xFFC01D10 /* DMA Channel 12 X Count Register */
+#define DMA12_X_MODIFY 0xFFC01D14 /* DMA Channel 12 X Modify Register */
+#define DMA12_Y_COUNT 0xFFC01D18 /* DMA Channel 12 Y Count Register */
+#define DMA12_Y_MODIFY 0xFFC01D1C /* DMA Channel 12 Y Modify Register */
+#define DMA12_CURR_DESC_PTR 0xFFC01D20 /* DMA Channel 12 Current Descriptor Pointer Register */
+#define DMA12_CURR_ADDR 0xFFC01D24 /* DMA Channel 12 Current Address Register */
+#define DMA12_IRQ_STATUS 0xFFC01D28 /* DMA Channel 12 Interrupt/Status Register */
+#define DMA12_PERIPHERAL_MAP 0xFFC01D2C /* DMA Channel 12 Peripheral Map Register */
+#define DMA12_CURR_X_COUNT 0xFFC01D30 /* DMA Channel 12 Current X Count Register */
+#define DMA12_CURR_Y_COUNT 0xFFC01D38 /* DMA Channel 12 Current Y Count Register */
+
+#define DMA13_NEXT_DESC_PTR 0xFFC01D40 /* DMA Channel 13 Next Descriptor Pointer Register */
+#define DMA13_START_ADDR 0xFFC01D44 /* DMA Channel 13 Start Address Register */
+#define DMA13_CONFIG 0xFFC01D48 /* DMA Channel 13 Configuration Register */
+#define DMA13_X_COUNT 0xFFC01D50 /* DMA Channel 13 X Count Register */
+#define DMA13_X_MODIFY 0xFFC01D54 /* DMA Channel 13 X Modify Register */
+#define DMA13_Y_COUNT 0xFFC01D58 /* DMA Channel 13 Y Count Register */
+#define DMA13_Y_MODIFY 0xFFC01D5C /* DMA Channel 13 Y Modify Register */
+#define DMA13_CURR_DESC_PTR 0xFFC01D60 /* DMA Channel 13 Current Descriptor Pointer Register */
+#define DMA13_CURR_ADDR 0xFFC01D64 /* DMA Channel 13 Current Address Register */
+#define DMA13_IRQ_STATUS 0xFFC01D68 /* DMA Channel 13 Interrupt/Status Register */
+#define DMA13_PERIPHERAL_MAP 0xFFC01D6C /* DMA Channel 13 Peripheral Map Register */
+#define DMA13_CURR_X_COUNT 0xFFC01D70 /* DMA Channel 13 Current X Count Register */
+#define DMA13_CURR_Y_COUNT 0xFFC01D78 /* DMA Channel 13 Current Y Count Register */
+
+#define DMA14_NEXT_DESC_PTR 0xFFC01D80 /* DMA Channel 14 Next Descriptor Pointer Register */
+#define DMA14_START_ADDR 0xFFC01D84 /* DMA Channel 14 Start Address Register */
+#define DMA14_CONFIG 0xFFC01D88 /* DMA Channel 14 Configuration Register */
+#define DMA14_X_COUNT 0xFFC01D90 /* DMA Channel 14 X Count Register */
+#define DMA14_X_MODIFY 0xFFC01D94 /* DMA Channel 14 X Modify Register */
+#define DMA14_Y_COUNT 0xFFC01D98 /* DMA Channel 14 Y Count Register */
+#define DMA14_Y_MODIFY 0xFFC01D9C /* DMA Channel 14 Y Modify Register */
+#define DMA14_CURR_DESC_PTR 0xFFC01DA0 /* DMA Channel 14 Current Descriptor Pointer Register */
+#define DMA14_CURR_ADDR 0xFFC01DA4 /* DMA Channel 14 Current Address Register */
+#define DMA14_IRQ_STATUS 0xFFC01DA8 /* DMA Channel 14 Interrupt/Status Register */
+#define DMA14_PERIPHERAL_MAP 0xFFC01DAC /* DMA Channel 14 Peripheral Map Register */
+#define DMA14_CURR_X_COUNT 0xFFC01DB0 /* DMA Channel 14 Current X Count Register */
+#define DMA14_CURR_Y_COUNT 0xFFC01DB8 /* DMA Channel 14 Current Y Count Register */
+
+#define DMA15_NEXT_DESC_PTR 0xFFC01DC0 /* DMA Channel 15 Next Descriptor Pointer Register */
+#define DMA15_START_ADDR 0xFFC01DC4 /* DMA Channel 15 Start Address Register */
+#define DMA15_CONFIG 0xFFC01DC8 /* DMA Channel 15 Configuration Register */
+#define DMA15_X_COUNT 0xFFC01DD0 /* DMA Channel 15 X Count Register */
+#define DMA15_X_MODIFY 0xFFC01DD4 /* DMA Channel 15 X Modify Register */
+#define DMA15_Y_COUNT 0xFFC01DD8 /* DMA Channel 15 Y Count Register */
+#define DMA15_Y_MODIFY 0xFFC01DDC /* DMA Channel 15 Y Modify Register */
+#define DMA15_CURR_DESC_PTR 0xFFC01DE0 /* DMA Channel 15 Current Descriptor Pointer Register */
+#define DMA15_CURR_ADDR 0xFFC01DE4 /* DMA Channel 15 Current Address Register */
+#define DMA15_IRQ_STATUS 0xFFC01DE8 /* DMA Channel 15 Interrupt/Status Register */
+#define DMA15_PERIPHERAL_MAP 0xFFC01DEC /* DMA Channel 15 Peripheral Map Register */
+#define DMA15_CURR_X_COUNT 0xFFC01DF0 /* DMA Channel 15 Current X Count Register */
+#define DMA15_CURR_Y_COUNT 0xFFC01DF8 /* DMA Channel 15 Current Y Count Register */
+
+#define DMA16_NEXT_DESC_PTR 0xFFC01E00 /* DMA Channel 16 Next Descriptor Pointer Register */
+#define DMA16_START_ADDR 0xFFC01E04 /* DMA Channel 16 Start Address Register */
+#define DMA16_CONFIG 0xFFC01E08 /* DMA Channel 16 Configuration Register */
+#define DMA16_X_COUNT 0xFFC01E10 /* DMA Channel 16 X Count Register */
+#define DMA16_X_MODIFY 0xFFC01E14 /* DMA Channel 16 X Modify Register */
+#define DMA16_Y_COUNT 0xFFC01E18 /* DMA Channel 16 Y Count Register */
+#define DMA16_Y_MODIFY 0xFFC01E1C /* DMA Channel 16 Y Modify Register */
+#define DMA16_CURR_DESC_PTR 0xFFC01E20 /* DMA Channel 16 Current Descriptor Pointer Register */
+#define DMA16_CURR_ADDR 0xFFC01E24 /* DMA Channel 16 Current Address Register */
+#define DMA16_IRQ_STATUS 0xFFC01E28 /* DMA Channel 16 Interrupt/Status Register */
+#define DMA16_PERIPHERAL_MAP 0xFFC01E2C /* DMA Channel 16 Peripheral Map Register */
+#define DMA16_CURR_X_COUNT 0xFFC01E30 /* DMA Channel 16 Current X Count Register */
+#define DMA16_CURR_Y_COUNT 0xFFC01E38 /* DMA Channel 16 Current Y Count Register */
+
+#define DMA17_NEXT_DESC_PTR 0xFFC01E40 /* DMA Channel 17 Next Descriptor Pointer Register */
+#define DMA17_START_ADDR 0xFFC01E44 /* DMA Channel 17 Start Address Register */
+#define DMA17_CONFIG 0xFFC01E48 /* DMA Channel 17 Configuration Register */
+#define DMA17_X_COUNT 0xFFC01E50 /* DMA Channel 17 X Count Register */
+#define DMA17_X_MODIFY 0xFFC01E54 /* DMA Channel 17 X Modify Register */
+#define DMA17_Y_COUNT 0xFFC01E58 /* DMA Channel 17 Y Count Register */
+#define DMA17_Y_MODIFY 0xFFC01E5C /* DMA Channel 17 Y Modify Register */
+#define DMA17_CURR_DESC_PTR 0xFFC01E60 /* DMA Channel 17 Current Descriptor Pointer Register */
+#define DMA17_CURR_ADDR 0xFFC01E64 /* DMA Channel 17 Current Address Register */
+#define DMA17_IRQ_STATUS 0xFFC01E68 /* DMA Channel 17 Interrupt/Status Register */
+#define DMA17_PERIPHERAL_MAP 0xFFC01E6C /* DMA Channel 17 Peripheral Map Register */
+#define DMA17_CURR_X_COUNT 0xFFC01E70 /* DMA Channel 17 Current X Count Register */
+#define DMA17_CURR_Y_COUNT 0xFFC01E78 /* DMA Channel 17 Current Y Count Register */
+
+#define DMA18_NEXT_DESC_PTR 0xFFC01E80 /* DMA Channel 18 Next Descriptor Pointer Register */
+#define DMA18_START_ADDR 0xFFC01E84 /* DMA Channel 18 Start Address Register */
+#define DMA18_CONFIG 0xFFC01E88 /* DMA Channel 18 Configuration Register */
+#define DMA18_X_COUNT 0xFFC01E90 /* DMA Channel 18 X Count Register */
+#define DMA18_X_MODIFY 0xFFC01E94 /* DMA Channel 18 X Modify Register */
+#define DMA18_Y_COUNT 0xFFC01E98 /* DMA Channel 18 Y Count Register */
+#define DMA18_Y_MODIFY 0xFFC01E9C /* DMA Channel 18 Y Modify Register */
+#define DMA18_CURR_DESC_PTR 0xFFC01EA0 /* DMA Channel 18 Current Descriptor Pointer Register */
+#define DMA18_CURR_ADDR 0xFFC01EA4 /* DMA Channel 18 Current Address Register */
+#define DMA18_IRQ_STATUS 0xFFC01EA8 /* DMA Channel 18 Interrupt/Status Register */
+#define DMA18_PERIPHERAL_MAP 0xFFC01EAC /* DMA Channel 18 Peripheral Map Register */
+#define DMA18_CURR_X_COUNT 0xFFC01EB0 /* DMA Channel 18 Current X Count Register */
+#define DMA18_CURR_Y_COUNT 0xFFC01EB8 /* DMA Channel 18 Current Y Count Register */
+
+#define DMA19_NEXT_DESC_PTR 0xFFC01EC0 /* DMA Channel 19 Next Descriptor Pointer Register */
+#define DMA19_START_ADDR 0xFFC01EC4 /* DMA Channel 19 Start Address Register */
+#define DMA19_CONFIG 0xFFC01EC8 /* DMA Channel 19 Configuration Register */
+#define DMA19_X_COUNT 0xFFC01ED0 /* DMA Channel 19 X Count Register */
+#define DMA19_X_MODIFY 0xFFC01ED4 /* DMA Channel 19 X Modify Register */
+#define DMA19_Y_COUNT 0xFFC01ED8 /* DMA Channel 19 Y Count Register */
+#define DMA19_Y_MODIFY 0xFFC01EDC /* DMA Channel 19 Y Modify Register */
+#define DMA19_CURR_DESC_PTR 0xFFC01EE0 /* DMA Channel 19 Current Descriptor Pointer Register */
+#define DMA19_CURR_ADDR 0xFFC01EE4 /* DMA Channel 19 Current Address Register */
+#define DMA19_IRQ_STATUS 0xFFC01EE8 /* DMA Channel 19 Interrupt/Status Register */
+#define DMA19_PERIPHERAL_MAP 0xFFC01EEC /* DMA Channel 19 Peripheral Map Register */
+#define DMA19_CURR_X_COUNT 0xFFC01EF0 /* DMA Channel 19 Current X Count Register */
+#define DMA19_CURR_Y_COUNT 0xFFC01EF8 /* DMA Channel 19 Current Y Count Register */
+
+#define MDMA_D2_NEXT_DESC_PTR 0xFFC01F00 /* MemDMA1 Stream 0 Destination Next Descriptor Pointer Register */
+#define MDMA_D2_START_ADDR 0xFFC01F04 /* MemDMA1 Stream 0 Destination Start Address Register */
+#define MDMA_D2_CONFIG 0xFFC01F08 /* MemDMA1 Stream 0 Destination Configuration Register */
+#define MDMA_D2_X_COUNT 0xFFC01F10 /* MemDMA1 Stream 0 Destination X Count Register */
+#define MDMA_D2_X_MODIFY 0xFFC01F14 /* MemDMA1 Stream 0 Destination X Modify Register */
+#define MDMA_D2_Y_COUNT 0xFFC01F18 /* MemDMA1 Stream 0 Destination Y Count Register */
+#define MDMA_D2_Y_MODIFY 0xFFC01F1C /* MemDMA1 Stream 0 Destination Y Modify Register */
+#define MDMA_D2_CURR_DESC_PTR 0xFFC01F20 /* MemDMA1 Stream 0 Destination Current Descriptor Pointer Register */
+#define MDMA_D2_CURR_ADDR 0xFFC01F24 /* MemDMA1 Stream 0 Destination Current Address Register */
+#define MDMA_D2_IRQ_STATUS 0xFFC01F28 /* MemDMA1 Stream 0 Destination Interrupt/Status Register */
+#define MDMA_D2_PERIPHERAL_MAP 0xFFC01F2C /* MemDMA1 Stream 0 Destination Peripheral Map Register */
+#define MDMA_D2_CURR_X_COUNT 0xFFC01F30 /* MemDMA1 Stream 0 Destination Current X Count Register */
+#define MDMA_D2_CURR_Y_COUNT 0xFFC01F38 /* MemDMA1 Stream 0 Destination Current Y Count Register */
+
+#define MDMA_S2_NEXT_DESC_PTR 0xFFC01F40 /* MemDMA1 Stream 0 Source Next Descriptor Pointer Register */
+#define MDMA_S2_START_ADDR 0xFFC01F44 /* MemDMA1 Stream 0 Source Start Address Register */
+#define MDMA_S2_CONFIG 0xFFC01F48 /* MemDMA1 Stream 0 Source Configuration Register */
+#define MDMA_S2_X_COUNT 0xFFC01F50 /* MemDMA1 Stream 0 Source X Count Register */
+#define MDMA_S2_X_MODIFY 0xFFC01F54 /* MemDMA1 Stream 0 Source X Modify Register */
+#define MDMA_S2_Y_COUNT 0xFFC01F58 /* MemDMA1 Stream 0 Source Y Count Register */
+#define MDMA_S2_Y_MODIFY 0xFFC01F5C /* MemDMA1 Stream 0 Source Y Modify Register */
+#define MDMA_S2_CURR_DESC_PTR 0xFFC01F60 /* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
+#define MDMA_S2_CURR_ADDR 0xFFC01F64 /* MemDMA1 Stream 0 Source Current Address Register */
+#define MDMA_S2_IRQ_STATUS 0xFFC01F68 /* MemDMA1 Stream 0 Source Interrupt/Status Register */
+#define MDMA_S2_PERIPHERAL_MAP 0xFFC01F6C /* MemDMA1 Stream 0 Source Peripheral Map Register */
+#define MDMA_S2_CURR_X_COUNT 0xFFC01F70 /* MemDMA1 Stream 0 Source Current X Count Register */
+#define MDMA_S2_CURR_Y_COUNT 0xFFC01F78 /* MemDMA1 Stream 0 Source Current Y Count Register */
+
+#define MDMA_D3_NEXT_DESC_PTR 0xFFC01F80 /* MemDMA1 Stream 1 Destination Next Descriptor Pointer Register */
+#define MDMA_D3_START_ADDR 0xFFC01F84 /* MemDMA1 Stream 1 Destination Start Address Register */
+#define MDMA_D3_CONFIG 0xFFC01F88 /* MemDMA1 Stream 1 Destination Configuration Register */
+#define MDMA_D3_X_COUNT 0xFFC01F90 /* MemDMA1 Stream 1 Destination X Count Register */
+#define MDMA_D3_X_MODIFY 0xFFC01F94 /* MemDMA1 Stream 1 Destination X Modify Register */
+#define MDMA_D3_Y_COUNT 0xFFC01F98 /* MemDMA1 Stream 1 Destination Y Count Register */
+#define MDMA_D3_Y_MODIFY 0xFFC01F9C /* MemDMA1 Stream 1 Destination Y Modify Register */
+#define MDMA_D3_CURR_DESC_PTR 0xFFC01FA0 /* MemDMA1 Stream 1 Destination Current Descriptor Pointer Register */
+#define MDMA_D3_CURR_ADDR 0xFFC01FA4 /* MemDMA1 Stream 1 Destination Current Address Register */
+#define MDMA_D3_IRQ_STATUS 0xFFC01FA8 /* MemDMA1 Stream 1 Destination Interrupt/Status Register */
+#define MDMA_D3_PERIPHERAL_MAP 0xFFC01FAC /* MemDMA1 Stream 1 Destination Peripheral Map Register */
+#define MDMA_D3_CURR_X_COUNT 0xFFC01FB0 /* MemDMA1 Stream 1 Destination Current X Count Register */
+#define MDMA_D3_CURR_Y_COUNT 0xFFC01FB8 /* MemDMA1 Stream 1 Destination Current Y Count Register */
+
+#define MDMA_S3_NEXT_DESC_PTR 0xFFC01FC0 /* MemDMA1 Stream 1 Source Next Descriptor Pointer Register */
+#define MDMA_S3_START_ADDR 0xFFC01FC4 /* MemDMA1 Stream 1 Source Start Address Register */
+#define MDMA_S3_CONFIG 0xFFC01FC8 /* MemDMA1 Stream 1 Source Configuration Register */
+#define MDMA_S3_X_COUNT 0xFFC01FD0 /* MemDMA1 Stream 1 Source X Count Register */
+#define MDMA_S3_X_MODIFY 0xFFC01FD4 /* MemDMA1 Stream 1 Source X Modify Register */
+#define MDMA_S3_Y_COUNT 0xFFC01FD8 /* MemDMA1 Stream 1 Source Y Count Register */
+#define MDMA_S3_Y_MODIFY 0xFFC01FDC /* MemDMA1 Stream 1 Source Y Modify Register */
+#define MDMA_S3_CURR_DESC_PTR 0xFFC01FE0 /* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
+#define MDMA_S3_CURR_ADDR 0xFFC01FE4 /* MemDMA1 Stream 1 Source Current Address Register */
+#define MDMA_S3_IRQ_STATUS 0xFFC01FE8 /* MemDMA1 Stream 1 Source Interrupt/Status Register */
+#define MDMA_S3_PERIPHERAL_MAP 0xFFC01FEC /* MemDMA1 Stream 1 Source Peripheral Map Register */
+#define MDMA_S3_CURR_X_COUNT 0xFFC01FF0 /* MemDMA1 Stream 1 Source Current X Count Register */
+#define MDMA_S3_CURR_Y_COUNT 0xFFC01FF8 /* MemDMA1 Stream 1 Source Current Y Count Register */
+
+
+/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
+#define UART1_THR 0xFFC02000 /* Transmit Holding register */
+#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
+#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
+#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
+#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
+#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
+#define UART1_LCR 0xFFC0200C /* Line Control Register */
+#define UART1_MCR 0xFFC02010 /* Modem Control Register */
+#define UART1_LSR 0xFFC02014 /* Line Status Register */
+#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
+#define UART1_GCTL 0xFFC02024 /* Global Control Register */
+
+
+/* UART2 Controller (0xFFC02100 - 0xFFC021FF) */
+#define UART2_THR 0xFFC02100 /* Transmit Holding register */
+#define UART2_RBR 0xFFC02100 /* Receive Buffer register */
+#define UART2_DLL 0xFFC02100 /* Divisor Latch (Low-Byte) */
+#define UART2_IER 0xFFC02104 /* Interrupt Enable Register */
+#define UART2_DLH 0xFFC02104 /* Divisor Latch (High-Byte) */
+#define UART2_IIR 0xFFC02108 /* Interrupt Identification Register */
+#define UART2_LCR 0xFFC0210C /* Line Control Register */
+#define UART2_MCR 0xFFC02110 /* Modem Control Register */
+#define UART2_LSR 0xFFC02114 /* Line Status Register */
+#define UART2_SCR 0xFFC0211C /* SCR Scratch Register */
+#define UART2_GCTL 0xFFC02124 /* Global Control Register */
+
+
+/* Two-Wire Interface 1 (0xFFC02200 - 0xFFC022FF) */
+#define TWI1_CLKDIV 0xFFC02200 /* Serial Clock Divider Register */
+#define TWI1_CONTROL 0xFFC02204 /* TWI1 Master Internal Time Reference Register */
+#define TWI1_SLAVE_CTL 0xFFC02208 /* Slave Mode Control Register */
+#define TWI1_SLAVE_STAT 0xFFC0220C /* Slave Mode Status Register */
+#define TWI1_SLAVE_ADDR 0xFFC02210 /* Slave Mode Address Register */
+#define TWI1_MASTER_CTL 0xFFC02214 /* Master Mode Control Register */
+#define TWI1_MASTER_STAT 0xFFC02218 /* Master Mode Status Register */
+#define TWI1_MASTER_ADDR 0xFFC0221C /* Master Mode Address Register */
+#define TWI1_INT_STAT 0xFFC02220 /* TWI1 Master Interrupt Register */
+#define TWI1_INT_MASK 0xFFC02224 /* TWI1 Master Interrupt Mask Register */
+#define TWI1_FIFO_CTL 0xFFC02228 /* FIFO Control Register */
+#define TWI1_FIFO_STAT 0xFFC0222C /* FIFO Status Register */
+#define TWI1_XMT_DATA8 0xFFC02280 /* FIFO Transmit Data Single Byte Register */
+#define TWI1_XMT_DATA16 0xFFC02284 /* FIFO Transmit Data Double Byte Register */
+#define TWI1_RCV_DATA8 0xFFC02288 /* FIFO Receive Data Single Byte Register */
+#define TWI1_RCV_DATA16 0xFFC0228C /* FIFO Receive Data Double Byte Register */
+#define TWI1_REGBASE TWI1_CLKDIV
+
+
+/* the following are for backwards compatibility */
+#define TWI1_PRESCALE TWI1_CONTROL
+#define TWI1_INT_SRC TWI1_INT_STAT
+#define TWI1_INT_ENABLE TWI1_INT_MASK
+
+
+/* SPI1 Controller (0xFFC02300 - 0xFFC023FF) */
+#define SPI1_CTL 0xFFC02300 /* SPI1 Control Register */
+#define SPI1_FLG 0xFFC02304 /* SPI1 Flag register */
+#define SPI1_STAT 0xFFC02308 /* SPI1 Status register */
+#define SPI1_TDBR 0xFFC0230C /* SPI1 Transmit Data Buffer Register */
+#define SPI1_RDBR 0xFFC02310 /* SPI1 Receive Data Buffer Register */
+#define SPI1_BAUD 0xFFC02314 /* SPI1 Baud rate Register */
+#define SPI1_SHADOW 0xFFC02318 /* SPI1_RDBR Shadow Register */
+#define SPI1_REGBASE SPI1_CTL
+
+/* SPI2 Controller (0xFFC02400 - 0xFFC024FF) */
+#define SPI2_CTL 0xFFC02400 /* SPI2 Control Register */
+#define SPI2_FLG 0xFFC02404 /* SPI2 Flag register */
+#define SPI2_STAT 0xFFC02408 /* SPI2 Status register */
+#define SPI2_TDBR 0xFFC0240C /* SPI2 Transmit Data Buffer Register */
+#define SPI2_RDBR 0xFFC02410 /* SPI2 Receive Data Buffer Register */
+#define SPI2_BAUD 0xFFC02414 /* SPI2 Baud rate Register */
+#define SPI2_SHADOW 0xFFC02418 /* SPI2_RDBR Shadow Register */
+#define SPI2_REGBASE SPI2_CTL
+
+/* SPORT2 Controller (0xFFC02500 - 0xFFC025FF) */
+#define SPORT2_TCR1 0xFFC02500 /* SPORT2 Transmit Configuration 1 Register */
+#define SPORT2_TCR2 0xFFC02504 /* SPORT2 Transmit Configuration 2 Register */
+#define SPORT2_TCLKDIV 0xFFC02508 /* SPORT2 Transmit Clock Divider */
+#define SPORT2_TFSDIV 0xFFC0250C /* SPORT2 Transmit Frame Sync Divider */
+#define SPORT2_TX 0xFFC02510 /* SPORT2 TX Data Register */
+#define SPORT2_RX 0xFFC02518 /* SPORT2 RX Data Register */
+#define SPORT2_RCR1 0xFFC02520 /* SPORT2 Transmit Configuration 1 Register */
+#define SPORT2_RCR2 0xFFC02524 /* SPORT2 Transmit Configuration 2 Register */
+#define SPORT2_RCLKDIV 0xFFC02528 /* SPORT2 Receive Clock Divider */
+#define SPORT2_RFSDIV 0xFFC0252C /* SPORT2 Receive Frame Sync Divider */
+#define SPORT2_STAT 0xFFC02530 /* SPORT2 Status Register */
+#define SPORT2_CHNL 0xFFC02534 /* SPORT2 Current Channel Register */
+#define SPORT2_MCMC1 0xFFC02538 /* SPORT2 Multi-Channel Configuration Register 1 */
+#define SPORT2_MCMC2 0xFFC0253C /* SPORT2 Multi-Channel Configuration Register 2 */
+#define SPORT2_MTCS0 0xFFC02540 /* SPORT2 Multi-Channel Transmit Select Register 0 */
+#define SPORT2_MTCS1 0xFFC02544 /* SPORT2 Multi-Channel Transmit Select Register 1 */
+#define SPORT2_MTCS2 0xFFC02548 /* SPORT2 Multi-Channel Transmit Select Register 2 */
+#define SPORT2_MTCS3 0xFFC0254C /* SPORT2 Multi-Channel Transmit Select Register 3 */
+#define SPORT2_MRCS0 0xFFC02550 /* SPORT2 Multi-Channel Receive Select Register 0 */
+#define SPORT2_MRCS1 0xFFC02554 /* SPORT2 Multi-Channel Receive Select Register 1 */
+#define SPORT2_MRCS2 0xFFC02558 /* SPORT2 Multi-Channel Receive Select Register 2 */
+#define SPORT2_MRCS3 0xFFC0255C /* SPORT2 Multi-Channel Receive Select Register 3 */
+
+
+/* SPORT3 Controller (0xFFC02600 - 0xFFC026FF) */
+#define SPORT3_TCR1 0xFFC02600 /* SPORT3 Transmit Configuration 1 Register */
+#define SPORT3_TCR2 0xFFC02604 /* SPORT3 Transmit Configuration 2 Register */
+#define SPORT3_TCLKDIV 0xFFC02608 /* SPORT3 Transmit Clock Divider */
+#define SPORT3_TFSDIV 0xFFC0260C /* SPORT3 Transmit Frame Sync Divider */
+#define SPORT3_TX 0xFFC02610 /* SPORT3 TX Data Register */
+#define SPORT3_RX 0xFFC02618 /* SPORT3 RX Data Register */
+#define SPORT3_RCR1 0xFFC02620 /* SPORT3 Transmit Configuration 1 Register */
+#define SPORT3_RCR2 0xFFC02624 /* SPORT3 Transmit Configuration 2 Register */
+#define SPORT3_RCLKDIV 0xFFC02628 /* SPORT3 Receive Clock Divider */
+#define SPORT3_RFSDIV 0xFFC0262C /* SPORT3 Receive Frame Sync Divider */
+#define SPORT3_STAT 0xFFC02630 /* SPORT3 Status Register */
+#define SPORT3_CHNL 0xFFC02634 /* SPORT3 Current Channel Register */
+#define SPORT3_MCMC1 0xFFC02638 /* SPORT3 Multi-Channel Configuration Register 1 */
+#define SPORT3_MCMC2 0xFFC0263C /* SPORT3 Multi-Channel Configuration Register 2 */
+#define SPORT3_MTCS0 0xFFC02640 /* SPORT3 Multi-Channel Transmit Select Register 0 */
+#define SPORT3_MTCS1 0xFFC02644 /* SPORT3 Multi-Channel Transmit Select Register 1 */
+#define SPORT3_MTCS2 0xFFC02648 /* SPORT3 Multi-Channel Transmit Select Register 2 */
+#define SPORT3_MTCS3 0xFFC0264C /* SPORT3 Multi-Channel Transmit Select Register 3 */
+#define SPORT3_MRCS0 0xFFC02650 /* SPORT3 Multi-Channel Receive Select Register 0 */
+#define SPORT3_MRCS1 0xFFC02654 /* SPORT3 Multi-Channel Receive Select Register 1 */
+#define SPORT3_MRCS2 0xFFC02658 /* SPORT3 Multi-Channel Receive Select Register 2 */
+#define SPORT3_MRCS3 0xFFC0265C /* SPORT3 Multi-Channel Receive Select Register 3 */
+
+
+/* CAN Controller (0xFFC02A00 - 0xFFC02FFF) */
+/* For Mailboxes 0-15 */
+#define CAN_MC1 0xFFC02A00 /* Mailbox config reg 1 */
+#define CAN_MD1 0xFFC02A04 /* Mailbox direction reg 1 */
+#define CAN_TRS1 0xFFC02A08 /* Transmit Request Set reg 1 */
+#define CAN_TRR1 0xFFC02A0C /* Transmit Request Reset reg 1 */
+#define CAN_TA1 0xFFC02A10 /* Transmit Acknowledge reg 1 */
+#define CAN_AA1 0xFFC02A14 /* Transmit Abort Acknowledge reg 1 */
+#define CAN_RMP1 0xFFC02A18 /* Receive Message Pending reg 1 */
+#define CAN_RML1 0xFFC02A1C /* Receive Message Lost reg 1 */
+#define CAN_MBTIF1 0xFFC02A20 /* Mailbox Transmit Interrupt Flag reg 1 */
+#define CAN_MBRIF1 0xFFC02A24 /* Mailbox Receive Interrupt Flag reg 1 */
+#define CAN_MBIM1 0xFFC02A28 /* Mailbox Interrupt Mask reg 1 */
+#define CAN_RFH1 0xFFC02A2C /* Remote Frame Handling reg 1 */
+#define CAN_OPSS1 0xFFC02A30 /* Overwrite Protection Single Shot Xmission reg 1 */
+
+/* For Mailboxes 16-31 */
+#define CAN_MC2 0xFFC02A40 /* Mailbox config reg 2 */
+#define CAN_MD2 0xFFC02A44 /* Mailbox direction reg 2 */
+#define CAN_TRS2 0xFFC02A48 /* Transmit Request Set reg 2 */
+#define CAN_TRR2 0xFFC02A4C /* Transmit Request Reset reg 2 */
+#define CAN_TA2 0xFFC02A50 /* Transmit Acknowledge reg 2 */
+#define CAN_AA2 0xFFC02A54 /* Transmit Abort Acknowledge reg 2 */
+#define CAN_RMP2 0xFFC02A58 /* Receive Message Pending reg 2 */
+#define CAN_RML2 0xFFC02A5C /* Receive Message Lost reg 2 */
+#define CAN_MBTIF2 0xFFC02A60 /* Mailbox Transmit Interrupt Flag reg 2 */
+#define CAN_MBRIF2 0xFFC02A64 /* Mailbox Receive Interrupt Flag reg 2 */
+#define CAN_MBIM2 0xFFC02A68 /* Mailbox Interrupt Mask reg 2 */
+#define CAN_RFH2 0xFFC02A6C /* Remote Frame Handling reg 2 */
+#define CAN_OPSS2 0xFFC02A70 /* Overwrite Protection Single Shot Xmission reg 2 */
+
+#define CAN_CLOCK 0xFFC02A80 /* Bit Timing Configuration register 0 */
+#define CAN_TIMING 0xFFC02A84 /* Bit Timing Configuration register 1 */
+
+#define CAN_DEBUG 0xFFC02A88 /* Debug Register */
+/* the following is for backwards compatibility */
+#define CAN_CNF CAN_DEBUG
+
+#define CAN_STATUS 0xFFC02A8C /* Global Status Register */
+#define CAN_CEC 0xFFC02A90 /* Error Counter Register */
+#define CAN_GIS 0xFFC02A94 /* Global Interrupt Status Register */
+#define CAN_GIM 0xFFC02A98 /* Global Interrupt Mask Register */
+#define CAN_GIF 0xFFC02A9C /* Global Interrupt Flag Register */
+#define CAN_CONTROL 0xFFC02AA0 /* Master Control Register */
+#define CAN_INTR 0xFFC02AA4 /* Interrupt Pending Register */
+#define CAN_MBTD 0xFFC02AAC /* Mailbox Temporary Disable Feature */
+#define CAN_EWR 0xFFC02AB0 /* Programmable Warning Level */
+#define CAN_ESR 0xFFC02AB4 /* Error Status Register */
+#define CAN_UCCNT 0xFFC02AC4 /* Universal Counter */
+#define CAN_UCRC 0xFFC02AC8 /* Universal Counter Reload/Capture Register */
+#define CAN_UCCNF 0xFFC02ACC /* Universal Counter Configuration Register */
+
+/* Mailbox Acceptance Masks */
+#define CAN_AM00L 0xFFC02B00 /* Mailbox 0 Low Acceptance Mask */
+#define CAN_AM00H 0xFFC02B04 /* Mailbox 0 High Acceptance Mask */
+#define CAN_AM01L 0xFFC02B08 /* Mailbox 1 Low Acceptance Mask */
+#define CAN_AM01H 0xFFC02B0C /* Mailbox 1 High Acceptance Mask */
+#define CAN_AM02L 0xFFC02B10 /* Mailbox 2 Low Acceptance Mask */
+#define CAN_AM02H 0xFFC02B14 /* Mailbox 2 High Acceptance Mask */
+#define CAN_AM03L 0xFFC02B18 /* Mailbox 3 Low Acceptance Mask */
+#define CAN_AM03H 0xFFC02B1C /* Mailbox 3 High Acceptance Mask */
+#define CAN_AM04L 0xFFC02B20 /* Mailbox 4 Low Acceptance Mask */
+#define CAN_AM04H 0xFFC02B24 /* Mailbox 4 High Acceptance Mask */
+#define CAN_AM05L 0xFFC02B28 /* Mailbox 5 Low Acceptance Mask */
+#define CAN_AM05H 0xFFC02B2C /* Mailbox 5 High Acceptance Mask */
+#define CAN_AM06L 0xFFC02B30 /* Mailbox 6 Low Acceptance Mask */
+#define CAN_AM06H 0xFFC02B34 /* Mailbox 6 High Acceptance Mask */
+#define CAN_AM07L 0xFFC02B38 /* Mailbox 7 Low Acceptance Mask */
+#define CAN_AM07H 0xFFC02B3C /* Mailbox 7 High Acceptance Mask */
+#define CAN_AM08L 0xFFC02B40 /* Mailbox 8 Low Acceptance Mask */
+#define CAN_AM08H 0xFFC02B44 /* Mailbox 8 High Acceptance Mask */
+#define CAN_AM09L 0xFFC02B48 /* Mailbox 9 Low Acceptance Mask */
+#define CAN_AM09H 0xFFC02B4C /* Mailbox 9 High Acceptance Mask */
+#define CAN_AM10L 0xFFC02B50 /* Mailbox 10 Low Acceptance Mask */
+#define CAN_AM10H 0xFFC02B54 /* Mailbox 10 High Acceptance Mask */
+#define CAN_AM11L 0xFFC02B58 /* Mailbox 11 Low Acceptance Mask */
+#define CAN_AM11H 0xFFC02B5C /* Mailbox 11 High Acceptance Mask */
+#define CAN_AM12L 0xFFC02B60 /* Mailbox 12 Low Acceptance Mask */
+#define CAN_AM12H 0xFFC02B64 /* Mailbox 12 High Acceptance Mask */
+#define CAN_AM13L 0xFFC02B68 /* Mailbox 13 Low Acceptance Mask */
+#define CAN_AM13H 0xFFC02B6C /* Mailbox 13 High Acceptance Mask */
+#define CAN_AM14L 0xFFC02B70 /* Mailbox 14 Low Acceptance Mask */
+#define CAN_AM14H 0xFFC02B74 /* Mailbox 14 High Acceptance Mask */
+#define CAN_AM15L 0xFFC02B78 /* Mailbox 15 Low Acceptance Mask */
+#define CAN_AM15H 0xFFC02B7C /* Mailbox 15 High Acceptance Mask */
+
+#define CAN_AM16L 0xFFC02B80 /* Mailbox 16 Low Acceptance Mask */
+#define CAN_AM16H 0xFFC02B84 /* Mailbox 16 High Acceptance Mask */
+#define CAN_AM17L 0xFFC02B88 /* Mailbox 17 Low Acceptance Mask */
+#define CAN_AM17H 0xFFC02B8C /* Mailbox 17 High Acceptance Mask */
+#define CAN_AM18L 0xFFC02B90 /* Mailbox 18 Low Acceptance Mask */
+#define CAN_AM18H 0xFFC02B94 /* Mailbox 18 High Acceptance Mask */
+#define CAN_AM19L 0xFFC02B98 /* Mailbox 19 Low Acceptance Mask */
+#define CAN_AM19H 0xFFC02B9C /* Mailbox 19 High Acceptance Mask */
+#define CAN_AM20L 0xFFC02BA0 /* Mailbox 20 Low Acceptance Mask */
+#define CAN_AM20H 0xFFC02BA4 /* Mailbox 20 High Acceptance Mask */
+#define CAN_AM21L 0xFFC02BA8 /* Mailbox 21 Low Acceptance Mask */
+#define CAN_AM21H 0xFFC02BAC /* Mailbox 21 High Acceptance Mask */
+#define CAN_AM22L 0xFFC02BB0 /* Mailbox 22 Low Acceptance Mask */
+#define CAN_AM22H 0xFFC02BB4 /* Mailbox 22 High Acceptance Mask */
+#define CAN_AM23L 0xFFC02BB8 /* Mailbox 23 Low Acceptance Mask */
+#define CAN_AM23H 0xFFC02BBC /* Mailbox 23 High Acceptance Mask */
+#define CAN_AM24L 0xFFC02BC0 /* Mailbox 24 Low Acceptance Mask */
+#define CAN_AM24H 0xFFC02BC4 /* Mailbox 24 High Acceptance Mask */
+#define CAN_AM25L 0xFFC02BC8 /* Mailbox 25 Low Acceptance Mask */
+#define CAN_AM25H 0xFFC02BCC /* Mailbox 25 High Acceptance Mask */
+#define CAN_AM26L 0xFFC02BD0 /* Mailbox 26 Low Acceptance Mask */
+#define CAN_AM26H 0xFFC02BD4 /* Mailbox 26 High Acceptance Mask */
+#define CAN_AM27L 0xFFC02BD8 /* Mailbox 27 Low Acceptance Mask */
+#define CAN_AM27H 0xFFC02BDC /* Mailbox 27 High Acceptance Mask */
+#define CAN_AM28L 0xFFC02BE0 /* Mailbox 28 Low Acceptance Mask */
+#define CAN_AM28H 0xFFC02BE4 /* Mailbox 28 High Acceptance Mask */
+#define CAN_AM29L 0xFFC02BE8 /* Mailbox 29 Low Acceptance Mask */
+#define CAN_AM29H 0xFFC02BEC /* Mailbox 29 High Acceptance Mask */
+#define CAN_AM30L 0xFFC02BF0 /* Mailbox 30 Low Acceptance Mask */
+#define CAN_AM30H 0xFFC02BF4 /* Mailbox 30 High Acceptance Mask */
+#define CAN_AM31L 0xFFC02BF8 /* Mailbox 31 Low Acceptance Mask */
+#define CAN_AM31H 0xFFC02BFC /* Mailbox 31 High Acceptance Mask */
+
+/* CAN Acceptance Mask Macros */
+#define CAN_AM_L(x) (CAN_AM00L+((x)*0x8))
+#define CAN_AM_H(x) (CAN_AM00H+((x)*0x8))
+
+/* Mailbox Registers */
+#define CAN_MB00_DATA0 0xFFC02C00 /* Mailbox 0 Data Word 0 [15:0] Register */
+#define CAN_MB00_DATA1 0xFFC02C04 /* Mailbox 0 Data Word 1 [31:16] Register */
+#define CAN_MB00_DATA2 0xFFC02C08 /* Mailbox 0 Data Word 2 [47:32] Register */
+#define CAN_MB00_DATA3 0xFFC02C0C /* Mailbox 0 Data Word 3 [63:48] Register */
+#define CAN_MB00_LENGTH 0xFFC02C10 /* Mailbox 0 Data Length Code Register */
+#define CAN_MB00_TIMESTAMP 0xFFC02C14 /* Mailbox 0 Time Stamp Value Register */
+#define CAN_MB00_ID0 0xFFC02C18 /* Mailbox 0 Identifier Low Register */
+#define CAN_MB00_ID1 0xFFC02C1C /* Mailbox 0 Identifier High Register */
+
+#define CAN_MB01_DATA0 0xFFC02C20 /* Mailbox 1 Data Word 0 [15:0] Register */
+#define CAN_MB01_DATA1 0xFFC02C24 /* Mailbox 1 Data Word 1 [31:16] Register */
+#define CAN_MB01_DATA2 0xFFC02C28 /* Mailbox 1 Data Word 2 [47:32] Register */
+#define CAN_MB01_DATA3 0xFFC02C2C /* Mailbox 1 Data Word 3 [63:48] Register */
+#define CAN_MB01_LENGTH 0xFFC02C30 /* Mailbox 1 Data Length Code Register */
+#define CAN_MB01_TIMESTAMP 0xFFC02C34 /* Mailbox 1 Time Stamp Value Register */
+#define CAN_MB01_ID0 0xFFC02C38 /* Mailbox 1 Identifier Low Register */
+#define CAN_MB01_ID1 0xFFC02C3C /* Mailbox 1 Identifier High Register */
+
+#define CAN_MB02_DATA0 0xFFC02C40 /* Mailbox 2 Data Word 0 [15:0] Register */
+#define CAN_MB02_DATA1 0xFFC02C44 /* Mailbox 2 Data Word 1 [31:16] Register */
+#define CAN_MB02_DATA2 0xFFC02C48 /* Mailbox 2 Data Word 2 [47:32] Register */
+#define CAN_MB02_DATA3 0xFFC02C4C /* Mailbox 2 Data Word 3 [63:48] Register */
+#define CAN_MB02_LENGTH 0xFFC02C50 /* Mailbox 2 Data Length Code Register */
+#define CAN_MB02_TIMESTAMP 0xFFC02C54 /* Mailbox 2 Time Stamp Value Register */
+#define CAN_MB02_ID0 0xFFC02C58 /* Mailbox 2 Identifier Low Register */
+#define CAN_MB02_ID1 0xFFC02C5C /* Mailbox 2 Identifier High Register */
+
+#define CAN_MB03_DATA0 0xFFC02C60 /* Mailbox 3 Data Word 0 [15:0] Register */
+#define CAN_MB03_DATA1 0xFFC02C64 /* Mailbox 3 Data Word 1 [31:16] Register */
+#define CAN_MB03_DATA2 0xFFC02C68 /* Mailbox 3 Data Word 2 [47:32] Register */
+#define CAN_MB03_DATA3 0xFFC02C6C /* Mailbox 3 Data Word 3 [63:48] Register */
+#define CAN_MB03_LENGTH 0xFFC02C70 /* Mailbox 3 Data Length Code Register */
+#define CAN_MB03_TIMESTAMP 0xFFC02C74 /* Mailbox 3 Time Stamp Value Register */
+#define CAN_MB03_ID0 0xFFC02C78 /* Mailbox 3 Identifier Low Register */
+#define CAN_MB03_ID1 0xFFC02C7C /* Mailbox 3 Identifier High Register */
+
+#define CAN_MB04_DATA0 0xFFC02C80 /* Mailbox 4 Data Word 0 [15:0] Register */
+#define CAN_MB04_DATA1 0xFFC02C84 /* Mailbox 4 Data Word 1 [31:16] Register */
+#define CAN_MB04_DATA2 0xFFC02C88 /* Mailbox 4 Data Word 2 [47:32] Register */
+#define CAN_MB04_DATA3 0xFFC02C8C /* Mailbox 4 Data Word 3 [63:48] Register */
+#define CAN_MB04_LENGTH 0xFFC02C90 /* Mailbox 4 Data Length Code Register */
+#define CAN_MB04_TIMESTAMP 0xFFC02C94 /* Mailbox 4 Time Stamp Value Register */
+#define CAN_MB04_ID0 0xFFC02C98 /* Mailbox 4 Identifier Low Register */
+#define CAN_MB04_ID1 0xFFC02C9C /* Mailbox 4 Identifier High Register */
+
+#define CAN_MB05_DATA0 0xFFC02CA0 /* Mailbox 5 Data Word 0 [15:0] Register */
+#define CAN_MB05_DATA1 0xFFC02CA4 /* Mailbox 5 Data Word 1 [31:16] Register */
+#define CAN_MB05_DATA2 0xFFC02CA8 /* Mailbox 5 Data Word 2 [47:32] Register */
+#define CAN_MB05_DATA3 0xFFC02CAC /* Mailbox 5 Data Word 3 [63:48] Register */
+#define CAN_MB05_LENGTH 0xFFC02CB0 /* Mailbox 5 Data Length Code Register */
+#define CAN_MB05_TIMESTAMP 0xFFC02CB4 /* Mailbox 5 Time Stamp Value Register */
+#define CAN_MB05_ID0 0xFFC02CB8 /* Mailbox 5 Identifier Low Register */
+#define CAN_MB05_ID1 0xFFC02CBC /* Mailbox 5 Identifier High Register */
+
+#define CAN_MB06_DATA0 0xFFC02CC0 /* Mailbox 6 Data Word 0 [15:0] Register */
+#define CAN_MB06_DATA1 0xFFC02CC4 /* Mailbox 6 Data Word 1 [31:16] Register */
+#define CAN_MB06_DATA2 0xFFC02CC8 /* Mailbox 6 Data Word 2 [47:32] Register */
+#define CAN_MB06_DATA3 0xFFC02CCC /* Mailbox 6 Data Word 3 [63:48] Register */
+#define CAN_MB06_LENGTH 0xFFC02CD0 /* Mailbox 6 Data Length Code Register */
+#define CAN_MB06_TIMESTAMP 0xFFC02CD4 /* Mailbox 6 Time Stamp Value Register */
+#define CAN_MB06_ID0 0xFFC02CD8 /* Mailbox 6 Identifier Low Register */
+#define CAN_MB06_ID1 0xFFC02CDC /* Mailbox 6 Identifier High Register */
+
+#define CAN_MB07_DATA0 0xFFC02CE0 /* Mailbox 7 Data Word 0 [15:0] Register */
+#define CAN_MB07_DATA1 0xFFC02CE4 /* Mailbox 7 Data Word 1 [31:16] Register */
+#define CAN_MB07_DATA2 0xFFC02CE8 /* Mailbox 7 Data Word 2 [47:32] Register */
+#define CAN_MB07_DATA3 0xFFC02CEC /* Mailbox 7 Data Word 3 [63:48] Register */
+#define CAN_MB07_LENGTH 0xFFC02CF0 /* Mailbox 7 Data Length Code Register */
+#define CAN_MB07_TIMESTAMP 0xFFC02CF4 /* Mailbox 7 Time Stamp Value Register */
+#define CAN_MB07_ID0 0xFFC02CF8 /* Mailbox 7 Identifier Low Register */
+#define CAN_MB07_ID1 0xFFC02CFC /* Mailbox 7 Identifier High Register */
+
+#define CAN_MB08_DATA0 0xFFC02D00 /* Mailbox 8 Data Word 0 [15:0] Register */
+#define CAN_MB08_DATA1 0xFFC02D04 /* Mailbox 8 Data Word 1 [31:16] Register */
+#define CAN_MB08_DATA2 0xFFC02D08 /* Mailbox 8 Data Word 2 [47:32] Register */
+#define CAN_MB08_DATA3 0xFFC02D0C /* Mailbox 8 Data Word 3 [63:48] Register */
+#define CAN_MB08_LENGTH 0xFFC02D10 /* Mailbox 8 Data Length Code Register */
+#define CAN_MB08_TIMESTAMP 0xFFC02D14 /* Mailbox 8 Time Stamp Value Register */
+#define CAN_MB08_ID0 0xFFC02D18 /* Mailbox 8 Identifier Low Register */
+#define CAN_MB08_ID1 0xFFC02D1C /* Mailbox 8 Identifier High Register */
+
+#define CAN_MB09_DATA0 0xFFC02D20 /* Mailbox 9 Data Word 0 [15:0] Register */
+#define CAN_MB09_DATA1 0xFFC02D24 /* Mailbox 9 Data Word 1 [31:16] Register */
+#define CAN_MB09_DATA2 0xFFC02D28 /* Mailbox 9 Data Word 2 [47:32] Register */
+#define CAN_MB09_DATA3 0xFFC02D2C /* Mailbox 9 Data Word 3 [63:48] Register */
+#define CAN_MB09_LENGTH 0xFFC02D30 /* Mailbox 9 Data Length Code Register */
+#define CAN_MB09_TIMESTAMP 0xFFC02D34 /* Mailbox 9 Time Stamp Value Register */
+#define CAN_MB09_ID0 0xFFC02D38 /* Mailbox 9 Identifier Low Register */
+#define CAN_MB09_ID1 0xFFC02D3C /* Mailbox 9 Identifier High Register */
+
+#define CAN_MB10_DATA0 0xFFC02D40 /* Mailbox 10 Data Word 0 [15:0] Register */
+#define CAN_MB10_DATA1 0xFFC02D44 /* Mailbox 10 Data Word 1 [31:16] Register */
+#define CAN_MB10_DATA2 0xFFC02D48 /* Mailbox 10 Data Word 2 [47:32] Register */
+#define CAN_MB10_DATA3 0xFFC02D4C /* Mailbox 10 Data Word 3 [63:48] Register */
+#define CAN_MB10_LENGTH 0xFFC02D50 /* Mailbox 10 Data Length Code Register */
+#define CAN_MB10_TIMESTAMP 0xFFC02D54 /* Mailbox 10 Time Stamp Value Register */
+#define CAN_MB10_ID0 0xFFC02D58 /* Mailbox 10 Identifier Low Register */
+#define CAN_MB10_ID1 0xFFC02D5C /* Mailbox 10 Identifier High Register */
+
+#define CAN_MB11_DATA0 0xFFC02D60 /* Mailbox 11 Data Word 0 [15:0] Register */
+#define CAN_MB11_DATA1 0xFFC02D64 /* Mailbox 11 Data Word 1 [31:16] Register */
+#define CAN_MB11_DATA2 0xFFC02D68 /* Mailbox 11 Data Word 2 [47:32] Register */
+#define CAN_MB11_DATA3 0xFFC02D6C /* Mailbox 11 Data Word 3 [63:48] Register */
+#define CAN_MB11_LENGTH 0xFFC02D70 /* Mailbox 11 Data Length Code Register */
+#define CAN_MB11_TIMESTAMP 0xFFC02D74 /* Mailbox 11 Time Stamp Value Register */
+#define CAN_MB11_ID0 0xFFC02D78 /* Mailbox 11 Identifier Low Register */
+#define CAN_MB11_ID1 0xFFC02D7C /* Mailbox 11 Identifier High Register */
+
+#define CAN_MB12_DATA0 0xFFC02D80 /* Mailbox 12 Data Word 0 [15:0] Register */
+#define CAN_MB12_DATA1 0xFFC02D84 /* Mailbox 12 Data Word 1 [31:16] Register */
+#define CAN_MB12_DATA2 0xFFC02D88 /* Mailbox 12 Data Word 2 [47:32] Register */
+#define CAN_MB12_DATA3 0xFFC02D8C /* Mailbox 12 Data Word 3 [63:48] Register */
+#define CAN_MB12_LENGTH 0xFFC02D90 /* Mailbox 12 Data Length Code Register */
+#define CAN_MB12_TIMESTAMP 0xFFC02D94 /* Mailbox 12 Time Stamp Value Register */
+#define CAN_MB12_ID0 0xFFC02D98 /* Mailbox 12 Identifier Low Register */
+#define CAN_MB12_ID1 0xFFC02D9C /* Mailbox 12 Identifier High Register */
+
+#define CAN_MB13_DATA0 0xFFC02DA0 /* Mailbox 13 Data Word 0 [15:0] Register */
+#define CAN_MB13_DATA1 0xFFC02DA4 /* Mailbox 13 Data Word 1 [31:16] Register */
+#define CAN_MB13_DATA2 0xFFC02DA8 /* Mailbox 13 Data Word 2 [47:32] Register */
+#define CAN_MB13_DATA3 0xFFC02DAC /* Mailbox 13 Data Word 3 [63:48] Register */
+#define CAN_MB13_LENGTH 0xFFC02DB0 /* Mailbox 13 Data Length Code Register */
+#define CAN_MB13_TIMESTAMP 0xFFC02DB4 /* Mailbox 13 Time Stamp Value Register */
+#define CAN_MB13_ID0 0xFFC02DB8 /* Mailbox 13 Identifier Low Register */
+#define CAN_MB13_ID1 0xFFC02DBC /* Mailbox 13 Identifier High Register */
+
+#define CAN_MB14_DATA0 0xFFC02DC0 /* Mailbox 14 Data Word 0 [15:0] Register */
+#define CAN_MB14_DATA1 0xFFC02DC4 /* Mailbox 14 Data Word 1 [31:16] Register */
+#define CAN_MB14_DATA2 0xFFC02DC8 /* Mailbox 14 Data Word 2 [47:32] Register */
+#define CAN_MB14_DATA3 0xFFC02DCC /* Mailbox 14 Data Word 3 [63:48] Register */
+#define CAN_MB14_LENGTH 0xFFC02DD0 /* Mailbox 14 Data Length Code Register */
+#define CAN_MB14_TIMESTAMP 0xFFC02DD4 /* Mailbox 14 Time Stamp Value Register */
+#define CAN_MB14_ID0 0xFFC02DD8 /* Mailbox 14 Identifier Low Register */
+#define CAN_MB14_ID1 0xFFC02DDC /* Mailbox 14 Identifier High Register */
+
+#define CAN_MB15_DATA0 0xFFC02DE0 /* Mailbox 15 Data Word 0 [15:0] Register */
+#define CAN_MB15_DATA1 0xFFC02DE4 /* Mailbox 15 Data Word 1 [31:16] Register */
+#define CAN_MB15_DATA2 0xFFC02DE8 /* Mailbox 15 Data Word 2 [47:32] Register */
+#define CAN_MB15_DATA3 0xFFC02DEC /* Mailbox 15 Data Word 3 [63:48] Register */
+#define CAN_MB15_LENGTH 0xFFC02DF0 /* Mailbox 15 Data Length Code Register */
+#define CAN_MB15_TIMESTAMP 0xFFC02DF4 /* Mailbox 15 Time Stamp Value Register */
+#define CAN_MB15_ID0 0xFFC02DF8 /* Mailbox 15 Identifier Low Register */
+#define CAN_MB15_ID1 0xFFC02DFC /* Mailbox 15 Identifier High Register */
+
+#define CAN_MB16_DATA0 0xFFC02E00 /* Mailbox 16 Data Word 0 [15:0] Register */
+#define CAN_MB16_DATA1 0xFFC02E04 /* Mailbox 16 Data Word 1 [31:16] Register */
+#define CAN_MB16_DATA2 0xFFC02E08 /* Mailbox 16 Data Word 2 [47:32] Register */
+#define CAN_MB16_DATA3 0xFFC02E0C /* Mailbox 16 Data Word 3 [63:48] Register */
+#define CAN_MB16_LENGTH 0xFFC02E10 /* Mailbox 16 Data Length Code Register */
+#define CAN_MB16_TIMESTAMP 0xFFC02E14 /* Mailbox 16 Time Stamp Value Register */
+#define CAN_MB16_ID0 0xFFC02E18 /* Mailbox 16 Identifier Low Register */
+#define CAN_MB16_ID1 0xFFC02E1C /* Mailbox 16 Identifier High Register */
+
+#define CAN_MB17_DATA0 0xFFC02E20 /* Mailbox 17 Data Word 0 [15:0] Register */
+#define CAN_MB17_DATA1 0xFFC02E24 /* Mailbox 17 Data Word 1 [31:16] Register */
+#define CAN_MB17_DATA2 0xFFC02E28 /* Mailbox 17 Data Word 2 [47:32] Register */
+#define CAN_MB17_DATA3 0xFFC02E2C /* Mailbox 17 Data Word 3 [63:48] Register */
+#define CAN_MB17_LENGTH 0xFFC02E30 /* Mailbox 17 Data Length Code Register */
+#define CAN_MB17_TIMESTAMP 0xFFC02E34 /* Mailbox 17 Time Stamp Value Register */
+#define CAN_MB17_ID0 0xFFC02E38 /* Mailbox 17 Identifier Low Register */
+#define CAN_MB17_ID1 0xFFC02E3C /* Mailbox 17 Identifier High Register */
+
+#define CAN_MB18_DATA0 0xFFC02E40 /* Mailbox 18 Data Word 0 [15:0] Register */
+#define CAN_MB18_DATA1 0xFFC02E44 /* Mailbox 18 Data Word 1 [31:16] Register */
+#define CAN_MB18_DATA2 0xFFC02E48 /* Mailbox 18 Data Word 2 [47:32] Register */
+#define CAN_MB18_DATA3 0xFFC02E4C /* Mailbox 18 Data Word 3 [63:48] Register */
+#define CAN_MB18_LENGTH 0xFFC02E50 /* Mailbox 18 Data Length Code Register */
+#define CAN_MB18_TIMESTAMP 0xFFC02E54 /* Mailbox 18 Time Stamp Value Register */
+#define CAN_MB18_ID0 0xFFC02E58 /* Mailbox 18 Identifier Low Register */
+#define CAN_MB18_ID1 0xFFC02E5C /* Mailbox 18 Identifier High Register */
+
+#define CAN_MB19_DATA0 0xFFC02E60 /* Mailbox 19 Data Word 0 [15:0] Register */
+#define CAN_MB19_DATA1 0xFFC02E64 /* Mailbox 19 Data Word 1 [31:16] Register */
+#define CAN_MB19_DATA2 0xFFC02E68 /* Mailbox 19 Data Word 2 [47:32] Register */
+#define CAN_MB19_DATA3 0xFFC02E6C /* Mailbox 19 Data Word 3 [63:48] Register */
+#define CAN_MB19_LENGTH 0xFFC02E70 /* Mailbox 19 Data Length Code Register */
+#define CAN_MB19_TIMESTAMP 0xFFC02E74 /* Mailbox 19 Time Stamp Value Register */
+#define CAN_MB19_ID0 0xFFC02E78 /* Mailbox 19 Identifier Low Register */
+#define CAN_MB19_ID1 0xFFC02E7C /* Mailbox 19 Identifier High Register */
+
+#define CAN_MB20_DATA0 0xFFC02E80 /* Mailbox 20 Data Word 0 [15:0] Register */
+#define CAN_MB20_DATA1 0xFFC02E84 /* Mailbox 20 Data Word 1 [31:16] Register */
+#define CAN_MB20_DATA2 0xFFC02E88 /* Mailbox 20 Data Word 2 [47:32] Register */
+#define CAN_MB20_DATA3 0xFFC02E8C /* Mailbox 20 Data Word 3 [63:48] Register */
+#define CAN_MB20_LENGTH 0xFFC02E90 /* Mailbox 20 Data Length Code Register */
+#define CAN_MB20_TIMESTAMP 0xFFC02E94 /* Mailbox 20 Time Stamp Value Register */
+#define CAN_MB20_ID0 0xFFC02E98 /* Mailbox 20 Identifier Low Register */
+#define CAN_MB20_ID1 0xFFC02E9C /* Mailbox 20 Identifier High Register */
+
+#define CAN_MB21_DATA0 0xFFC02EA0 /* Mailbox 21 Data Word 0 [15:0] Register */
+#define CAN_MB21_DATA1 0xFFC02EA4 /* Mailbox 21 Data Word 1 [31:16] Register */
+#define CAN_MB21_DATA2 0xFFC02EA8 /* Mailbox 21 Data Word 2 [47:32] Register */
+#define CAN_MB21_DATA3 0xFFC02EAC /* Mailbox 21 Data Word 3 [63:48] Register */
+#define CAN_MB21_LENGTH 0xFFC02EB0 /* Mailbox 21 Data Length Code Register */
+#define CAN_MB21_TIMESTAMP 0xFFC02EB4 /* Mailbox 21 Time Stamp Value Register */
+#define CAN_MB21_ID0 0xFFC02EB8 /* Mailbox 21 Identifier Low Register */
+#define CAN_MB21_ID1 0xFFC02EBC /* Mailbox 21 Identifier High Register */
+
+#define CAN_MB22_DATA0 0xFFC02EC0 /* Mailbox 22 Data Word 0 [15:0] Register */
+#define CAN_MB22_DATA1 0xFFC02EC4 /* Mailbox 22 Data Word 1 [31:16] Register */
+#define CAN_MB22_DATA2 0xFFC02EC8 /* Mailbox 22 Data Word 2 [47:32] Register */
+#define CAN_MB22_DATA3 0xFFC02ECC /* Mailbox 22 Data Word 3 [63:48] Register */
+#define CAN_MB22_LENGTH 0xFFC02ED0 /* Mailbox 22 Data Length Code Register */
+#define CAN_MB22_TIMESTAMP 0xFFC02ED4 /* Mailbox 22 Time Stamp Value Register */
+#define CAN_MB22_ID0 0xFFC02ED8 /* Mailbox 22 Identifier Low Register */
+#define CAN_MB22_ID1 0xFFC02EDC /* Mailbox 22 Identifier High Register */
+
+#define CAN_MB23_DATA0 0xFFC02EE0 /* Mailbox 23 Data Word 0 [15:0] Register */
+#define CAN_MB23_DATA1 0xFFC02EE4 /* Mailbox 23 Data Word 1 [31:16] Register */
+#define CAN_MB23_DATA2 0xFFC02EE8 /* Mailbox 23 Data Word 2 [47:32] Register */
+#define CAN_MB23_DATA3 0xFFC02EEC /* Mailbox 23 Data Word 3 [63:48] Register */
+#define CAN_MB23_LENGTH 0xFFC02EF0 /* Mailbox 23 Data Length Code Register */
+#define CAN_MB23_TIMESTAMP 0xFFC02EF4 /* Mailbox 23 Time Stamp Value Register */
+#define CAN_MB23_ID0 0xFFC02EF8 /* Mailbox 23 Identifier Low Register */
+#define CAN_MB23_ID1 0xFFC02EFC /* Mailbox 23 Identifier High Register */
+
+#define CAN_MB24_DATA0 0xFFC02F00 /* Mailbox 24 Data Word 0 [15:0] Register */
+#define CAN_MB24_DATA1 0xFFC02F04 /* Mailbox 24 Data Word 1 [31:16] Register */
+#define CAN_MB24_DATA2 0xFFC02F08 /* Mailbox 24 Data Word 2 [47:32] Register */
+#define CAN_MB24_DATA3 0xFFC02F0C /* Mailbox 24 Data Word 3 [63:48] Register */
+#define CAN_MB24_LENGTH 0xFFC02F10 /* Mailbox 24 Data Length Code Register */
+#define CAN_MB24_TIMESTAMP 0xFFC02F14 /* Mailbox 24 Time Stamp Value Register */
+#define CAN_MB24_ID0 0xFFC02F18 /* Mailbox 24 Identifier Low Register */
+#define CAN_MB24_ID1 0xFFC02F1C /* Mailbox 24 Identifier High Register */
+
+#define CAN_MB25_DATA0 0xFFC02F20 /* Mailbox 25 Data Word 0 [15:0] Register */
+#define CAN_MB25_DATA1 0xFFC02F24 /* Mailbox 25 Data Word 1 [31:16] Register */
+#define CAN_MB25_DATA2 0xFFC02F28 /* Mailbox 25 Data Word 2 [47:32] Register */
+#define CAN_MB25_DATA3 0xFFC02F2C /* Mailbox 25 Data Word 3 [63:48] Register */
+#define CAN_MB25_LENGTH 0xFFC02F30 /* Mailbox 25 Data Length Code Register */
+#define CAN_MB25_TIMESTAMP 0xFFC02F34 /* Mailbox 25 Time Stamp Value Register */
+#define CAN_MB25_ID0 0xFFC02F38 /* Mailbox 25 Identifier Low Register */
+#define CAN_MB25_ID1 0xFFC02F3C /* Mailbox 25 Identifier High Register */
+
+#define CAN_MB26_DATA0 0xFFC02F40 /* Mailbox 26 Data Word 0 [15:0] Register */
+#define CAN_MB26_DATA1 0xFFC02F44 /* Mailbox 26 Data Word 1 [31:16] Register */
+#define CAN_MB26_DATA2 0xFFC02F48 /* Mailbox 26 Data Word 2 [47:32] Register */
+#define CAN_MB26_DATA3 0xFFC02F4C /* Mailbox 26 Data Word 3 [63:48] Register */
+#define CAN_MB26_LENGTH 0xFFC02F50 /* Mailbox 26 Data Length Code Register */
+#define CAN_MB26_TIMESTAMP 0xFFC02F54 /* Mailbox 26 Time Stamp Value Register */
+#define CAN_MB26_ID0 0xFFC02F58 /* Mailbox 26 Identifier Low Register */
+#define CAN_MB26_ID1 0xFFC02F5C /* Mailbox 26 Identifier High Register */
+
+#define CAN_MB27_DATA0 0xFFC02F60 /* Mailbox 27 Data Word 0 [15:0] Register */
+#define CAN_MB27_DATA1 0xFFC02F64 /* Mailbox 27 Data Word 1 [31:16] Register */
+#define CAN_MB27_DATA2 0xFFC02F68 /* Mailbox 27 Data Word 2 [47:32] Register */
+#define CAN_MB27_DATA3 0xFFC02F6C /* Mailbox 27 Data Word 3 [63:48] Register */
+#define CAN_MB27_LENGTH 0xFFC02F70 /* Mailbox 27 Data Length Code Register */
+#define CAN_MB27_TIMESTAMP 0xFFC02F74 /* Mailbox 27 Time Stamp Value Register */
+#define CAN_MB27_ID0 0xFFC02F78 /* Mailbox 27 Identifier Low Register */
+#define CAN_MB27_ID1 0xFFC02F7C /* Mailbox 27 Identifier High Register */
+
+#define CAN_MB28_DATA0 0xFFC02F80 /* Mailbox 28 Data Word 0 [15:0] Register */
+#define CAN_MB28_DATA1 0xFFC02F84 /* Mailbox 28 Data Word 1 [31:16] Register */
+#define CAN_MB28_DATA2 0xFFC02F88 /* Mailbox 28 Data Word 2 [47:32] Register */
+#define CAN_MB28_DATA3 0xFFC02F8C /* Mailbox 28 Data Word 3 [63:48] Register */
+#define CAN_MB28_LENGTH 0xFFC02F90 /* Mailbox 28 Data Length Code Register */
+#define CAN_MB28_TIMESTAMP 0xFFC02F94 /* Mailbox 28 Time Stamp Value Register */
+#define CAN_MB28_ID0 0xFFC02F98 /* Mailbox 28 Identifier Low Register */
+#define CAN_MB28_ID1 0xFFC02F9C /* Mailbox 28 Identifier High Register */
+
+#define CAN_MB29_DATA0 0xFFC02FA0 /* Mailbox 29 Data Word 0 [15:0] Register */
+#define CAN_MB29_DATA1 0xFFC02FA4 /* Mailbox 29 Data Word 1 [31:16] Register */
+#define CAN_MB29_DATA2 0xFFC02FA8 /* Mailbox 29 Data Word 2 [47:32] Register */
+#define CAN_MB29_DATA3 0xFFC02FAC /* Mailbox 29 Data Word 3 [63:48] Register */
+#define CAN_MB29_LENGTH 0xFFC02FB0 /* Mailbox 29 Data Length Code Register */
+#define CAN_MB29_TIMESTAMP 0xFFC02FB4 /* Mailbox 29 Time Stamp Value Register */
+#define CAN_MB29_ID0 0xFFC02FB8 /* Mailbox 29 Identifier Low Register */
+#define CAN_MB29_ID1 0xFFC02FBC /* Mailbox 29 Identifier High Register */
+
+#define CAN_MB30_DATA0 0xFFC02FC0 /* Mailbox 30 Data Word 0 [15:0] Register */
+#define CAN_MB30_DATA1 0xFFC02FC4 /* Mailbox 30 Data Word 1 [31:16] Register */
+#define CAN_MB30_DATA2 0xFFC02FC8 /* Mailbox 30 Data Word 2 [47:32] Register */
+#define CAN_MB30_DATA3 0xFFC02FCC /* Mailbox 30 Data Word 3 [63:48] Register */
+#define CAN_MB30_LENGTH 0xFFC02FD0 /* Mailbox 30 Data Length Code Register */
+#define CAN_MB30_TIMESTAMP 0xFFC02FD4 /* Mailbox 30 Time Stamp Value Register */
+#define CAN_MB30_ID0 0xFFC02FD8 /* Mailbox 30 Identifier Low Register */
+#define CAN_MB30_ID1 0xFFC02FDC /* Mailbox 30 Identifier High Register */
+
+#define CAN_MB31_DATA0 0xFFC02FE0 /* Mailbox 31 Data Word 0 [15:0] Register */
+#define CAN_MB31_DATA1 0xFFC02FE4 /* Mailbox 31 Data Word 1 [31:16] Register */
+#define CAN_MB31_DATA2 0xFFC02FE8 /* Mailbox 31 Data Word 2 [47:32] Register */
+#define CAN_MB31_DATA3 0xFFC02FEC /* Mailbox 31 Data Word 3 [63:48] Register */
+#define CAN_MB31_LENGTH 0xFFC02FF0 /* Mailbox 31 Data Length Code Register */
+#define CAN_MB31_TIMESTAMP 0xFFC02FF4 /* Mailbox 31 Time Stamp Value Register */
+#define CAN_MB31_ID0 0xFFC02FF8 /* Mailbox 31 Identifier Low Register */
+#define CAN_MB31_ID1 0xFFC02FFC /* Mailbox 31 Identifier High Register */
+
+/* CAN Mailbox Area Macros */
+#define CAN_MB_ID1(x) (CAN_MB00_ID1+((x)*0x20))
+#define CAN_MB_ID0(x) (CAN_MB00_ID0+((x)*0x20))
+#define CAN_MB_TIMESTAMP(x) (CAN_MB00_TIMESTAMP+((x)*0x20))
+#define CAN_MB_LENGTH(x) (CAN_MB00_LENGTH+((x)*0x20))
+#define CAN_MB_DATA3(x) (CAN_MB00_DATA3+((x)*0x20))
+#define CAN_MB_DATA2(x) (CAN_MB00_DATA2+((x)*0x20))
+#define CAN_MB_DATA1(x) (CAN_MB00_DATA1+((x)*0x20))
+#define CAN_MB_DATA0(x) (CAN_MB00_DATA0+((x)*0x20))
+
+
+/*********************************************************************************** */
+/* System MMR Register Bits and Macros */
+/******************************************************************************* */
+
+/* SWRST Mask */
+#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
+#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
+#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
+#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
+#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
+
+/* SYSCR Masks */
+#define BMODE 0x0006 /* Boot Mode - Latched During HW Reset From Mode Pins */
+#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
+
+
+/* ************* SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
+
+/* Peripheral Masks For SIC0_ISR, SIC0_IWR, SIC0_IMASK */
+#define PLL_WAKEUP_IRQ 0x00000001 /* PLL Wakeup Interrupt Request */
+#define DMAC0_ERR_IRQ 0x00000002 /* DMA Controller 0 Error Interrupt Request */
+#define PPI_ERR_IRQ 0x00000004 /* PPI Error Interrupt Request */
+#define SPORT0_ERR_IRQ 0x00000008 /* SPORT0 Error Interrupt Request */
+#define SPORT1_ERR_IRQ 0x00000010 /* SPORT1 Error Interrupt Request */
+#define SPI0_ERR_IRQ 0x00000020 /* SPI0 Error Interrupt Request */
+#define UART0_ERR_IRQ 0x00000040 /* UART0 Error Interrupt Request */
+#define RTC_IRQ 0x00000080 /* Real-Time Clock Interrupt Request */
+#define DMA0_IRQ 0x00000100 /* DMA Channel 0 (PPI) Interrupt Request */
+#define DMA1_IRQ 0x00000200 /* DMA Channel 1 (SPORT0 RX) Interrupt Request */
+#define DMA2_IRQ 0x00000400 /* DMA Channel 2 (SPORT0 TX) Interrupt Request */
+#define DMA3_IRQ 0x00000800 /* DMA Channel 3 (SPORT1 RX) Interrupt Request */
+#define DMA4_IRQ 0x00001000 /* DMA Channel 4 (SPORT1 TX) Interrupt Request */
+#define DMA5_IRQ 0x00002000 /* DMA Channel 5 (SPI) Interrupt Request */
+#define DMA6_IRQ 0x00004000 /* DMA Channel 6 (UART RX) Interrupt Request */
+#define DMA7_IRQ 0x00008000 /* DMA Channel 7 (UART TX) Interrupt Request */
+#define TIMER0_IRQ 0x00010000 /* Timer 0 Interrupt Request */
+#define TIMER1_IRQ 0x00020000 /* Timer 1 Interrupt Request */
+#define TIMER2_IRQ 0x00040000 /* Timer 2 Interrupt Request */
+#define PFA_IRQ 0x00080000 /* Programmable Flag Interrupt Request A */
+#define PFB_IRQ 0x00100000 /* Programmable Flag Interrupt Request B */
+#define MDMA0_0_IRQ 0x00200000 /* MemDMA0 Stream 0 Interrupt Request */
+#define MDMA0_1_IRQ 0x00400000 /* MemDMA0 Stream 1 Interrupt Request */
+#define WDOG_IRQ 0x00800000 /* Software Watchdog Timer Interrupt Request */
+#define DMAC1_ERR_IRQ 0x01000000 /* DMA Controller 1 Error Interrupt Request */
+#define SPORT2_ERR_IRQ 0x02000000 /* SPORT2 Error Interrupt Request */
+#define SPORT3_ERR_IRQ 0x04000000 /* SPORT3 Error Interrupt Request */
+#define MXVR_SD_IRQ 0x08000000 /* MXVR Synchronous Data Interrupt Request */
+#define SPI1_ERR_IRQ 0x10000000 /* SPI1 Error Interrupt Request */
+#define SPI2_ERR_IRQ 0x20000000 /* SPI2 Error Interrupt Request */
+#define UART1_ERR_IRQ 0x40000000 /* UART1 Error Interrupt Request */
+#define UART2_ERR_IRQ 0x80000000 /* UART2 Error Interrupt Request */
+
+/* the following are for backwards compatibility */
+#define DMA0_ERR_IRQ DMAC0_ERR_IRQ
+#define DMA1_ERR_IRQ DMAC1_ERR_IRQ
+
+
+/* Peripheral Masks For SIC_ISR1, SIC_IWR1, SIC_IMASK1 */
+#define CAN_ERR_IRQ 0x00000001 /* CAN Error Interrupt Request */
+#define DMA8_IRQ 0x00000002 /* DMA Channel 8 (SPORT2 RX) Interrupt Request */
+#define DMA9_IRQ 0x00000004 /* DMA Channel 9 (SPORT2 TX) Interrupt Request */
+#define DMA10_IRQ 0x00000008 /* DMA Channel 10 (SPORT3 RX) Interrupt Request */
+#define DMA11_IRQ 0x00000010 /* DMA Channel 11 (SPORT3 TX) Interrupt Request */
+#define DMA12_IRQ 0x00000020 /* DMA Channel 12 Interrupt Request */
+#define DMA13_IRQ 0x00000040 /* DMA Channel 13 Interrupt Request */
+#define DMA14_IRQ 0x00000080 /* DMA Channel 14 (SPI1) Interrupt Request */
+#define DMA15_IRQ 0x00000100 /* DMA Channel 15 (SPI2) Interrupt Request */
+#define DMA16_IRQ 0x00000200 /* DMA Channel 16 (UART1 RX) Interrupt Request */
+#define DMA17_IRQ 0x00000400 /* DMA Channel 17 (UART1 TX) Interrupt Request */
+#define DMA18_IRQ 0x00000800 /* DMA Channel 18 (UART2 RX) Interrupt Request */
+#define DMA19_IRQ 0x00001000 /* DMA Channel 19 (UART2 TX) Interrupt Request */
+#define TWI0_IRQ 0x00002000 /* TWI0 Interrupt Request */
+#define TWI1_IRQ 0x00004000 /* TWI1 Interrupt Request */
+#define CAN_RX_IRQ 0x00008000 /* CAN Receive Interrupt Request */
+#define CAN_TX_IRQ 0x00010000 /* CAN Transmit Interrupt Request */
+#define MDMA1_0_IRQ 0x00020000 /* MemDMA1 Stream 0 Interrupt Request */
+#define MDMA1_1_IRQ 0x00040000 /* MemDMA1 Stream 1 Interrupt Request */
+#define MXVR_STAT_IRQ 0x00080000 /* MXVR Status Interrupt Request */
+#define MXVR_CM_IRQ 0x00100000 /* MXVR Control Message Interrupt Request */
+#define MXVR_AP_IRQ 0x00200000 /* MXVR Asynchronous Packet Interrupt */
+
+/* the following are for backwards compatibility */
+#define MDMA0_IRQ MDMA1_0_IRQ
+#define MDMA1_IRQ MDMA1_1_IRQ
+
+#ifdef _MISRA_RULES
+#define _MF15 0xFu
+#define _MF7 7u
+#else
+#define _MF15 0xF
+#define _MF7 7
+#endif /* _MISRA_RULES */
+
+/* SIC_IMASKx Masks */
+#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
+#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
+#ifdef _MISRA_RULES
+#define SIC_MASK(x) (1 << ((x)&0x1Fu)) /* Mask Peripheral #x interrupt */
+#define SIC_UNMASK(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Unmask Peripheral #x interrupt */
+#else
+#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
+#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
+#endif /* _MISRA_RULES */
+
+/* SIC_IWRx Masks */
+#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
+#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
+#ifdef _MISRA_RULES
+#define IWR_ENABLE(x) (1 << ((x)&0x1Fu)) /* Wakeup Enable Peripheral #x */
+#define IWR_DISABLE(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Wakeup Disable Peripheral #x */
+#else
+#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
+#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
+#endif /* _MISRA_RULES */
+
+/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
+/* PPI_CONTROL Masks */
+#define PORT_EN 0x0001 /* PPI Port Enable */
+#define PORT_DIR 0x0002 /* PPI Port Direction */
+#define XFR_TYPE 0x000C /* PPI Transfer Type */
+#define PORT_CFG 0x0030 /* PPI Port Configuration */
+#define FLD_SEL 0x0040 /* PPI Active Field Select */
+#define PACK_EN 0x0080 /* PPI Packing Mode */
+/* previous versions of defBF539.h erroneously included DMA32 (PPI 32-bit DMA Enable) */
+#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
+#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
+#define DLENGTH 0x3800 /* PPI Data Length */
+#define DLEN_8 0x0 /* PPI Data Length mask for DLEN=8 */
+#define DLEN_10 0x0800 /* Data Length = 10 Bits */
+#define DLEN_11 0x1000 /* Data Length = 11 Bits */
+#define DLEN_12 0x1800 /* Data Length = 12 Bits */
+#define DLEN_13 0x2000 /* Data Length = 13 Bits */
+#define DLEN_14 0x2800 /* Data Length = 14 Bits */
+#define DLEN_15 0x3000 /* Data Length = 15 Bits */
+#define DLEN_16 0x3800 /* Data Length = 16 Bits */
+#ifdef _MISRA_RULES
+#define DLEN(x) ((((x)-9u) & 0x07u) << 11) /* PPI Data Length (only works for x=10-->x=16) */
+#else
+#define DLEN(x) ((((x)-9) & 0x07) << 11) /* PPI Data Length (only works for x=10-->x=16) */
+#endif /* _MISRA_RULES */
+#define POL 0xC000 /* PPI Signal Polarities */
+#define POLC 0x4000 /* PPI Clock Polarity */
+#define POLS 0x8000 /* PPI Frame Sync Polarity */
+
+
+/* PPI_STATUS Masks */
+#define FLD 0x0400 /* Field Indicator */
+#define FT_ERR 0x0800 /* Frame Track Error */
+#define OVR 0x1000 /* FIFO Overflow Error */
+#define UNDR 0x2000 /* FIFO Underrun Error */
+#define ERR_DET 0x4000 /* Error Detected Indicator */
+#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
+
+
+/* ********** DMA CONTROLLER MASKS ***********************/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
+
+#define CTYPE 0x0040 /* DMA Channel Type Indicator */
+#define CTYPE_P 0x6 /* DMA Channel Type Indicator BIT POSITION */
+#define PCAP8 0x0080 /* DMA 8-bit Operation Indicator */
+#define PCAP16 0x0100 /* DMA 16-bit Operation Indicator */
+#define PCAP32 0x0200 /* DMA 32-bit Operation Indicator */
+#define PCAPWR 0x0400 /* DMA Write Operation Indicator */
+#define PCAPRD 0x0800 /* DMA Read Operation Indicator */
+#define PMAP 0xF000 /* DMA Peripheral Map Field */
+
+/* PMAP Encodings For DMA Controller 0 */
+#define PMAP_PPI 0x0000 /* PMAP PPI Port DMA */
+#define PMAP_SPORT0RX 0x1000 /* PMAP SPORT0 Receive DMA */
+#define PMAP_SPORT0TX 0x2000 /* PMAP SPORT0 Transmit DMA */
+#define PMAP_SPORT1RX 0x3000 /* PMAP SPORT1 Receive DMA */
+#define PMAP_SPORT1TX 0x4000 /* PMAP SPORT1 Transmit DMA */
+#define PMAP_SPI0 0x5000 /* PMAP SPI DMA */
+#define PMAP_UART0RX 0x6000 /* PMAP UART Receive DMA */
+#define PMAP_UART0TX 0x7000 /* PMAP UART Transmit DMA */
+
+/* PMAP Encodings For DMA Controller 1 */
+#define PMAP_SPORT2RX 0x0000 /* PMAP SPORT2 Receive DMA */
+#define PMAP_SPORT2TX 0x1000 /* PMAP SPORT2 Transmit DMA */
+#define PMAP_SPORT3RX 0x2000 /* PMAP SPORT3 Receive DMA */
+#define PMAP_SPORT3TX 0x3000 /* PMAP SPORT3 Transmit DMA */
+#define PMAP_SPI1 0x6000 /* PMAP SPI1 DMA */
+#define PMAP_SPI2 0x7000 /* PMAP SPI2 DMA */
+#define PMAP_UART1RX 0x8000 /* PMAP UART1 Receive DMA */
+#define PMAP_UART1TX 0x9000 /* PMAP UART1 Transmit DMA */
+#define PMAP_UART2RX 0xA000 /* PMAP UART2 Receive DMA */
+#define PMAP_UART2TX 0xB000 /* PMAP UART2 Transmit DMA */
+
+
+/* ************* GENERAL PURPOSE TIMER MASKS ******************** */
+/* PWM Timer bit definitions */
+/* TIMER_ENABLE Register */
+#define TIMEN0 0x0001 /* Enable Timer 0 */
+#define TIMEN1 0x0002 /* Enable Timer 1 */
+#define TIMEN2 0x0004 /* Enable Timer 2 */
+
+#define TIMEN0_P 0x00
+#define TIMEN1_P 0x01
+#define TIMEN2_P 0x02
+
+/* TIMER_DISABLE Register */
+#define TIMDIS0 0x0001 /* Disable Timer 0 */
+#define TIMDIS1 0x0002 /* Disable Timer 1 */
+#define TIMDIS2 0x0004 /* Disable Timer 2 */
+
+#define TIMDIS0_P 0x00
+#define TIMDIS1_P 0x01
+#define TIMDIS2_P 0x02
+
+/* TIMER_STATUS Register */
+#define TIMIL0 0x0001 /* Timer 0 Interrupt */
+#define TIMIL1 0x0002 /* Timer 1 Interrupt */
+#define TIMIL2 0x0004 /* Timer 2 Interrupt */
+#define TOVF_ERR0 0x0010 /* Timer 0 Counter Overflow */
+#define TOVF_ERR1 0x0020 /* Timer 1 Counter Overflow */
+#define TOVF_ERR2 0x0040 /* Timer 2 Counter Overflow */
+#define TRUN0 0x1000 /* Timer 0 Slave Enable Status */
+#define TRUN1 0x2000 /* Timer 1 Slave Enable Status */
+#define TRUN2 0x4000 /* Timer 2 Slave Enable Status */
+
+#define TIMIL0_P 0x00
+#define TIMIL1_P 0x01
+#define TIMIL2_P 0x02
+#define TOVF_ERR0_P 0x04
+#define TOVF_ERR1_P 0x05
+#define TOVF_ERR2_P 0x06
+#define TRUN0_P 0x0C
+#define TRUN1_P 0x0D
+#define TRUN2_P 0x0E
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR0_P TOVF_ERR0_P
+#define TOVL_ERR1_P TOVF_ERR1_P
+#define TOVL_ERR2_P TOVF_ERR2_P
+
+/* TIMERx_CONFIG Registers */
+#define PWM_OUT 0x0001
+#define WDTH_CAP 0x0002
+#define EXT_CLK 0x0003
+#define PULSE_HI 0x0004
+#define PERIOD_CNT 0x0008
+#define IRQ_ENA 0x0010
+#define TIN_SEL 0x0020
+#define OUT_DIS 0x0040
+#define CLK_SEL 0x0080
+#define TOGGLE_HI 0x0100
+#define EMU_RUN 0x0200
+#ifdef _MISRA_RULES
+#define ERR_TYP(x) (((x) & 0x03u) << 14)
+#else
+#define ERR_TYP(x) (((x) & 0x03) << 14)
+#endif /* _MISRA_RULES */
+
+#define TMODE_P0 0x00
+#define TMODE_P1 0x01
+#define PULSE_HI_P 0x02
+#define PERIOD_CNT_P 0x03
+#define IRQ_ENA_P 0x04
+#define TIN_SEL_P 0x05
+#define OUT_DIS_P 0x06
+#define CLK_SEL_P 0x07
+#define TOGGLE_HI_P 0x08
+#define EMU_RUN_P 0x09
+#define ERR_TYP_P0 0x0E
+#define ERR_TYP_P1 0x0F
+
+/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
+/* EBIU_AMGCTL Masks */
+#define AMCKEN 0x0001 /* Enable CLKOUT */
+#define AMBEN_NONE 0x0000 /* All Banks Disabled */
+#define AMBEN_B0 0x0002 /* Enable Asynchronous Memory Bank 0 only */
+#define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */
+#define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */
+#define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */
+#define CDPRIO 0x0100 /* DMA has priority over core for external accesses */
+
+/* EBIU_AMGCTL Bit Positions */
+#define AMCKEN_P 0x0000 /* Enable CLKOUT */
+#define AMBEN_P0 0x0001 /* Asynchronous Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
+#define AMBEN_P1 0x0002 /* Asynchronous Memory Enable, 010 - banks 0&1 enabled, 011 - banks 0-3 enabled */
+#define AMBEN_P2 0x0003 /* Asynchronous Memory Enable, 1xx - All banks (bank 0, 1, 2, and 3) enabled */
+
+/* EBIU_AMBCTL0 Masks */
+#define B0RDYEN 0x00000001 /* Bank 0 RDY Enable, 0=disable, 1=enable */
+#define B0RDYPOL 0x00000002 /* Bank 0 RDY Active high, 0=active low, 1=active high */
+#define B0TT_1 0x00000004 /* Bank 0 Transition Time from Read to Write = 1 cycle */
+#define B0TT_2 0x00000008 /* Bank 0 Transition Time from Read to Write = 2 cycles */
+#define B0TT_3 0x0000000C /* Bank 0 Transition Time from Read to Write = 3 cycles */
+#define B0TT_4 0x00000000 /* Bank 0 Transition Time from Read to Write = 4 cycles */
+#define B0ST_1 0x00000010 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
+#define B0ST_2 0x00000020 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
+#define B0ST_3 0x00000030 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
+#define B0ST_4 0x00000000 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
+#define B0HT_1 0x00000040 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 1 cycle */
+#define B0HT_2 0x00000080 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 2 cycles */
+#define B0HT_3 0x000000C0 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 3 cycles */
+#define B0HT_0 0x00000000 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 0 cycles */
+#define B0RAT_1 0x00000100 /* Bank 0 Read Access Time = 1 cycle */
+#define B0RAT_2 0x00000200 /* Bank 0 Read Access Time = 2 cycles */
+#define B0RAT_3 0x00000300 /* Bank 0 Read Access Time = 3 cycles */
+#define B0RAT_4 0x00000400 /* Bank 0 Read Access Time = 4 cycles */
+#define B0RAT_5 0x00000500 /* Bank 0 Read Access Time = 5 cycles */
+#define B0RAT_6 0x00000600 /* Bank 0 Read Access Time = 6 cycles */
+#define B0RAT_7 0x00000700 /* Bank 0 Read Access Time = 7 cycles */
+#define B0RAT_8 0x00000800 /* Bank 0 Read Access Time = 8 cycles */
+#define B0RAT_9 0x00000900 /* Bank 0 Read Access Time = 9 cycles */
+#define B0RAT_10 0x00000A00 /* Bank 0 Read Access Time = 10 cycles */
+#define B0RAT_11 0x00000B00 /* Bank 0 Read Access Time = 11 cycles */
+#define B0RAT_12 0x00000C00 /* Bank 0 Read Access Time = 12 cycles */
+#define B0RAT_13 0x00000D00 /* Bank 0 Read Access Time = 13 cycles */
+#define B0RAT_14 0x00000E00 /* Bank 0 Read Access Time = 14 cycles */
+#define B0RAT_15 0x00000F00 /* Bank 0 Read Access Time = 15 cycles */
+#define B0WAT_1 0x00001000 /* Bank 0 Write Access Time = 1 cycle */
+#define B0WAT_2 0x00002000 /* Bank 0 Write Access Time = 2 cycles */
+#define B0WAT_3 0x00003000 /* Bank 0 Write Access Time = 3 cycles */
+#define B0WAT_4 0x00004000 /* Bank 0 Write Access Time = 4 cycles */
+#define B0WAT_5 0x00005000 /* Bank 0 Write Access Time = 5 cycles */
+#define B0WAT_6 0x00006000 /* Bank 0 Write Access Time = 6 cycles */
+#define B0WAT_7 0x00007000 /* Bank 0 Write Access Time = 7 cycles */
+#define B0WAT_8 0x00008000 /* Bank 0 Write Access Time = 8 cycles */
+#define B0WAT_9 0x00009000 /* Bank 0 Write Access Time = 9 cycles */
+#define B0WAT_10 0x0000A000 /* Bank 0 Write Access Time = 10 cycles */
+#define B0WAT_11 0x0000B000 /* Bank 0 Write Access Time = 11 cycles */
+#define B0WAT_12 0x0000C000 /* Bank 0 Write Access Time = 12 cycles */
+#define B0WAT_13 0x0000D000 /* Bank 0 Write Access Time = 13 cycles */
+#define B0WAT_14 0x0000E000 /* Bank 0 Write Access Time = 14 cycles */
+#define B0WAT_15 0x0000F000 /* Bank 0 Write Access Time = 15 cycles */
+#define B1RDYEN 0x00010000 /* Bank 1 RDY enable, 0=disable, 1=enable */
+#define B1RDYPOL 0x00020000 /* Bank 1 RDY Active high, 0=active low, 1=active high */
+#define B1TT_1 0x00040000 /* Bank 1 Transition Time from Read to Write = 1 cycle */
+#define B1TT_2 0x00080000 /* Bank 1 Transition Time from Read to Write = 2 cycles */
+#define B1TT_3 0x000C0000 /* Bank 1 Transition Time from Read to Write = 3 cycles */
+#define B1TT_4 0x00000000 /* Bank 1 Transition Time from Read to Write = 4 cycles */
+#define B1ST_1 0x00100000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define B1ST_2 0x00200000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define B1ST_3 0x00300000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define B1ST_4 0x00000000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define B1HT_1 0x00400000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
+#define B1HT_2 0x00800000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
+#define B1HT_3 0x00C00000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
+#define B1HT_0 0x00000000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
+#define B1RAT_1 0x01000000 /* Bank 1 Read Access Time = 1 cycle */
+#define B1RAT_2 0x02000000 /* Bank 1 Read Access Time = 2 cycles */
+#define B1RAT_3 0x03000000 /* Bank 1 Read Access Time = 3 cycles */
+#define B1RAT_4 0x04000000 /* Bank 1 Read Access Time = 4 cycles */
+#define B1RAT_5 0x05000000 /* Bank 1 Read Access Time = 5 cycles */
+#define B1RAT_6 0x06000000 /* Bank 1 Read Access Time = 6 cycles */
+#define B1RAT_7 0x07000000 /* Bank 1 Read Access Time = 7 cycles */
+#define B1RAT_8 0x08000000 /* Bank 1 Read Access Time = 8 cycles */
+#define B1RAT_9 0x09000000 /* Bank 1 Read Access Time = 9 cycles */
+#define B1RAT_10 0x0A000000 /* Bank 1 Read Access Time = 10 cycles */
+#define B1RAT_11 0x0B000000 /* Bank 1 Read Access Time = 11 cycles */
+#define B1RAT_12 0x0C000000 /* Bank 1 Read Access Time = 12 cycles */
+#define B1RAT_13 0x0D000000 /* Bank 1 Read Access Time = 13 cycles */
+#define B1RAT_14 0x0E000000 /* Bank 1 Read Access Time = 14 cycles */
+#define B1RAT_15 0x0F000000 /* Bank 1 Read Access Time = 15 cycles */
+#define B1WAT_1 0x10000000 /* Bank 1 Write Access Time = 1 cycle */
+#define B1WAT_2 0x20000000 /* Bank 1 Write Access Time = 2 cycles */
+#define B1WAT_3 0x30000000 /* Bank 1 Write Access Time = 3 cycles */
+#define B1WAT_4 0x40000000 /* Bank 1 Write Access Time = 4 cycles */
+#define B1WAT_5 0x50000000 /* Bank 1 Write Access Time = 5 cycles */
+#define B1WAT_6 0x60000000 /* Bank 1 Write Access Time = 6 cycles */
+#define B1WAT_7 0x70000000 /* Bank 1 Write Access Time = 7 cycles */
+#define B1WAT_8 0x80000000 /* Bank 1 Write Access Time = 8 cycles */
+#define B1WAT_9 0x90000000 /* Bank 1 Write Access Time = 9 cycles */
+#define B1WAT_10 0xA0000000 /* Bank 1 Write Access Time = 10 cycles */
+#define B1WAT_11 0xB0000000 /* Bank 1 Write Access Time = 11 cycles */
+#define B1WAT_12 0xC0000000 /* Bank 1 Write Access Time = 12 cycles */
+#define B1WAT_13 0xD0000000 /* Bank 1 Write Access Time = 13 cycles */
+#define B1WAT_14 0xE0000000 /* Bank 1 Write Access Time = 14 cycles */
+#define B1WAT_15 0xF0000000 /* Bank 1 Write Access Time = 15 cycles */
+
+/* EBIU_AMBCTL1 Masks */
+#define B2RDYEN 0x00000001 /* Bank 2 RDY Enable, 0=disable, 1=enable */
+#define B2RDYPOL 0x00000002 /* Bank 2 RDY Active high, 0=active low, 1=active high */
+#define B2TT_1 0x00000004 /* Bank 2 Transition Time from Read to Write = 1 cycle */
+#define B2TT_2 0x00000008 /* Bank 2 Transition Time from Read to Write = 2 cycles */
+#define B2TT_3 0x0000000C /* Bank 2 Transition Time from Read to Write = 3 cycles */
+#define B2TT_4 0x00000000 /* Bank 2 Transition Time from Read to Write = 4 cycles */
+#define B2ST_1 0x00000010 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define B2ST_2 0x00000020 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define B2ST_3 0x00000030 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define B2ST_4 0x00000000 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define B2HT_1 0x00000040 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
+#define B2HT_2 0x00000080 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
+#define B2HT_3 0x000000C0 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
+#define B2HT_0 0x00000000 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
+#define B2RAT_1 0x00000100 /* Bank 2 Read Access Time = 1 cycle */
+#define B2RAT_2 0x00000200 /* Bank 2 Read Access Time = 2 cycles */
+#define B2RAT_3 0x00000300 /* Bank 2 Read Access Time = 3 cycles */
+#define B2RAT_4 0x00000400 /* Bank 2 Read Access Time = 4 cycles */
+#define B2RAT_5 0x00000500 /* Bank 2 Read Access Time = 5 cycles */
+#define B2RAT_6 0x00000600 /* Bank 2 Read Access Time = 6 cycles */
+#define B2RAT_7 0x00000700 /* Bank 2 Read Access Time = 7 cycles */
+#define B2RAT_8 0x00000800 /* Bank 2 Read Access Time = 8 cycles */
+#define B2RAT_9 0x00000900 /* Bank 2 Read Access Time = 9 cycles */
+#define B2RAT_10 0x00000A00 /* Bank 2 Read Access Time = 10 cycles */
+#define B2RAT_11 0x00000B00 /* Bank 2 Read Access Time = 11 cycles */
+#define B2RAT_12 0x00000C00 /* Bank 2 Read Access Time = 12 cycles */
+#define B2RAT_13 0x00000D00 /* Bank 2 Read Access Time = 13 cycles */
+#define B2RAT_14 0x00000E00 /* Bank 2 Read Access Time = 14 cycles */
+#define B2RAT_15 0x00000F00 /* Bank 2 Read Access Time = 15 cycles */
+#define B2WAT_1 0x00001000 /* Bank 2 Write Access Time = 1 cycle */
+#define B2WAT_2 0x00002000 /* Bank 2 Write Access Time = 2 cycles */
+#define B2WAT_3 0x00003000 /* Bank 2 Write Access Time = 3 cycles */
+#define B2WAT_4 0x00004000 /* Bank 2 Write Access Time = 4 cycles */
+#define B2WAT_5 0x00005000 /* Bank 2 Write Access Time = 5 cycles */
+#define B2WAT_6 0x00006000 /* Bank 2 Write Access Time = 6 cycles */
+#define B2WAT_7 0x00007000 /* Bank 2 Write Access Time = 7 cycles */
+#define B2WAT_8 0x00008000 /* Bank 2 Write Access Time = 8 cycles */
+#define B2WAT_9 0x00009000 /* Bank 2 Write Access Time = 9 cycles */
+#define B2WAT_10 0x0000A000 /* Bank 2 Write Access Time = 10 cycles */
+#define B2WAT_11 0x0000B000 /* Bank 2 Write Access Time = 11 cycles */
+#define B2WAT_12 0x0000C000 /* Bank 2 Write Access Time = 12 cycles */
+#define B2WAT_13 0x0000D000 /* Bank 2 Write Access Time = 13 cycles */
+#define B2WAT_14 0x0000E000 /* Bank 2 Write Access Time = 14 cycles */
+#define B2WAT_15 0x0000F000 /* Bank 2 Write Access Time = 15 cycles */
+#define B3RDYEN 0x00010000 /* Bank 3 RDY enable, 0=disable, 1=enable */
+#define B3RDYPOL 0x00020000 /* Bank 3 RDY Active high, 0=active low, 1=active high */
+#define B3TT_1 0x00040000 /* Bank 3 Transition Time from Read to Write = 1 cycle */
+#define B3TT_2 0x00080000 /* Bank 3 Transition Time from Read to Write = 2 cycles */
+#define B3TT_3 0x000C0000 /* Bank 3 Transition Time from Read to Write = 3 cycles */
+#define B3TT_4 0x00000000 /* Bank 3 Transition Time from Read to Write = 4 cycles */
+#define B3ST_1 0x00100000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define B3ST_2 0x00200000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define B3ST_3 0x00300000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define B3ST_4 0x00000000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define B3HT_1 0x00400000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
+#define B3HT_2 0x00800000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
+#define B3HT_3 0x00C00000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
+#define B3HT_0 0x00000000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
+#define B3RAT_1 0x01000000 /* Bank 3 Read Access Time = 1 cycle */
+#define B3RAT_2 0x02000000 /* Bank 3 Read Access Time = 2 cycles */
+#define B3RAT_3 0x03000000 /* Bank 3 Read Access Time = 3 cycles */
+#define B3RAT_4 0x04000000 /* Bank 3 Read Access Time = 4 cycles */
+#define B3RAT_5 0x05000000 /* Bank 3 Read Access Time = 5 cycles */
+#define B3RAT_6 0x06000000 /* Bank 3 Read Access Time = 6 cycles */
+#define B3RAT_7 0x07000000 /* Bank 3 Read Access Time = 7 cycles */
+#define B3RAT_8 0x08000000 /* Bank 3 Read Access Time = 8 cycles */
+#define B3RAT_9 0x09000000 /* Bank 3 Read Access Time = 9 cycles */
+#define B3RAT_10 0x0A000000 /* Bank 3 Read Access Time = 10 cycles */
+#define B3RAT_11 0x0B000000 /* Bank 3 Read Access Time = 11 cycles */
+#define B3RAT_12 0x0C000000 /* Bank 3 Read Access Time = 12 cycles */
+#define B3RAT_13 0x0D000000 /* Bank 3 Read Access Time = 13 cycles */
+#define B3RAT_14 0x0E000000 /* Bank 3 Read Access Time = 14 cycles */
+#define B3RAT_15 0x0F000000 /* Bank 3 Read Access Time = 15 cycles */
+#define B3WAT_1 0x10000000 /* Bank 3 Write Access Time = 1 cycle */
+#define B3WAT_2 0x20000000 /* Bank 3 Write Access Time = 2 cycles */
+#define B3WAT_3 0x30000000 /* Bank 3 Write Access Time = 3 cycles */
+#define B3WAT_4 0x40000000 /* Bank 3 Write Access Time = 4 cycles */
+#define B3WAT_5 0x50000000 /* Bank 3 Write Access Time = 5 cycles */
+#define B3WAT_6 0x60000000 /* Bank 3 Write Access Time = 6 cycles */
+#define B3WAT_7 0x70000000 /* Bank 3 Write Access Time = 7 cycles */
+#define B3WAT_8 0x80000000 /* Bank 3 Write Access Time = 8 cycles */
+#define B3WAT_9 0x90000000 /* Bank 3 Write Access Time = 9 cycles */
+#define B3WAT_10 0xA0000000 /* Bank 3 Write Access Time = 10 cycles */
+#define B3WAT_11 0xB0000000 /* Bank 3 Write Access Time = 11 cycles */
+#define B3WAT_12 0xC0000000 /* Bank 3 Write Access Time = 12 cycles */
+#define B3WAT_13 0xD0000000 /* Bank 3 Write Access Time = 13 cycles */
+#define B3WAT_14 0xE0000000 /* Bank 3 Write Access Time = 14 cycles */
+#define B3WAT_15 0xF0000000 /* Bank 3 Write Access Time = 15 cycles */
+
+/* ********************** SDRAM CONTROLLER MASKS *************************** */
+/* EBIU_SDGCTL Masks */
+#define SCTLE 0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
+#define CL_2 0x00000008 /* SDRAM CAS latency = 2 cycles */
+#define CL_3 0x0000000C /* SDRAM CAS latency = 3 cycles */
+#define PFE 0x00000010 /* Enable SDRAM prefetch */
+#define PFP 0x00000020 /* Prefetch has priority over AMC requests */
+#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
+#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
+#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
+#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
+#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
+#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
+#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
+#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
+#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
+#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
+#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
+#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
+#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
+#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
+#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
+#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
+#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
+#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
+#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
+#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
+#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
+#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
+#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
+#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
+#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
+#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
+#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
+#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
+#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
+#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
+#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
+#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
+#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
+#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
+#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
+#define PUPSD 0x00200000 /*Power-up start delay */
+#define PSM 0x00400000 /* SDRAM power-up sequence = Precharge, mode register set, 8 CBR refresh cycles */
+#define PSS 0x00800000 /* enable SDRAM power-up sequence on next SDRAM access */
+#define SRFS 0x01000000 /* Start SDRAM self-refresh mode */
+#define EBUFE 0x02000000 /* Enable external buffering timing */
+#define FBBRW 0x04000000 /* Fast back-to-back read write enable */
+#define EMREN 0x10000000 /* Extended mode register enable */
+#define TCSR 0x20000000 /* Temp compensated self refresh value 85 deg C */
+#define CDDBG 0x40000000 /* Tristate SDRAM controls during bus grant */
+
+/* EBIU_SDBCTL Masks */
+#define EBE 0x00000001 /* Enable SDRAM external bank */
+#define EBSZ_16 0x00000000 /* SDRAM external bank size = 16MB */
+#define EBSZ_32 0x00000002 /* SDRAM external bank size = 32MB */
+#define EBSZ_64 0x00000004 /* SDRAM external bank size = 64MB */
+#define EBSZ_128 0x00000006 /* SDRAM external bank size = 128MB */
+#define EBSZ_256 0x00000008 /* SDRAM External Bank Size = 256MB */
+#define EBSZ_512 0x0000000A /* SDRAM External Bank Size = 512MB */
+#define EBCAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */
+#define EBCAW_9 0x00000010 /* SDRAM external bank column address width = 9 bits */
+#define EBCAW_10 0x00000020 /* SDRAM external bank column address width = 9 bits */
+#define EBCAW_11 0x00000030 /* SDRAM external bank column address width = 9 bits */
+
+/* EBIU_SDSTAT Masks */
+#define SDCI 0x00000001 /* SDRAM controller is idle */
+#define SDSRA 0x00000002 /* SDRAM SDRAM self refresh is active */
+#define SDPUA 0x00000004 /* SDRAM power up active */
+#define SDRS 0x00000008 /* SDRAM is in reset state */
+#define SDEASE 0x00000010 /* SDRAM EAB sticky error status - W1C */
+#define BGSTAT 0x00000020 /* Bus granted */
+
+
+/* ******************** TWO-WIRE INTERFACE (TWIx) MASKS ***********************/
+/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
+#ifdef _MISRA_RULES
+#define CLKLOW(x) ((x) & 0xFFu) /* Periods Clock Is Held Low */
+#define CLKHI(y) (((y)&0xFFu)<<0x8) /* Periods Before New Clock Low */
+#else
+#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
+#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
+#endif /* _MISRA_RULES */
+
+/* TWIx_PRESCALE Masks */
+#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
+#define TWI_ENA 0x0080 /* TWI Enable */
+#define SCCB 0x0200 /* SCCB Compatibility Enable */
+
+/* TWIx_SLAVE_CTRL Masks */
+#define SEN 0x0001 /* Slave Enable */
+#define SADD_LEN 0x0002 /* Slave Address Length */
+#define STDVAL 0x0004 /* Slave Transmit Data Valid */
+#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
+#define GEN 0x0010 /* General Call Adrress Matching Enabled */
+
+/* TWIx_SLAVE_STAT Masks */
+#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
+#define GCALL 0x0002 /* General Call Indicator */
+
+/* TWIx_MASTER_CTRL Masks */
+#define MEN 0x0001 /* Master Mode Enable */
+#define MADD_LEN 0x0002 /* Master Address Length */
+#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
+#define FAST 0x0008 /* Use Fast Mode Timing Specs */
+#define STOP 0x0010 /* Issue Stop Condition */
+#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
+#define DCNT 0x3FC0 /* Data Bytes To Transfer */
+#define SDAOVR 0x4000 /* Serial Data Override */
+#define SCLOVR 0x8000 /* Serial Clock Override */
+
+/* TWIx_MASTER_STAT Masks */
+#define MPROG 0x0001 /* Master Transfer In Progress */
+#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
+#define ANAK 0x0004 /* Address Not Acknowledged */
+#define DNAK 0x0008 /* Data Not Acknowledged */
+#define BUFRDERR 0x0010 /* Buffer Read Error */
+#define BUFWRERR 0x0020 /* Buffer Write Error */
+#define SDASEN 0x0040 /* Serial Data Sense */
+#define SCLSEN 0x0080 /* Serial Clock Sense */
+#define BUSBUSY 0x0100 /* Bus Busy Indicator */
+
+/* TWIx_INT_SRC and TWIx_INT_ENABLE Masks */
+#define SINIT 0x0001 /* Slave Transfer Initiated */
+#define SCOMP 0x0002 /* Slave Transfer Complete */
+#define SERR 0x0004 /* Slave Transfer Error */
+#define SOVF 0x0008 /* Slave Overflow */
+#define MCOMP 0x0010 /* Master Transfer Complete */
+#define MERR 0x0020 /* Master Transfer Error */
+#define XMTSERV 0x0040 /* Transmit FIFO Service */
+#define RCVSERV 0x0080 /* Receive FIFO Service */
+
+/* TWIx_FIFO_CTL Masks */
+#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
+#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
+#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
+#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
+
+/* TWIx_FIFO_STAT Masks */
+#define XMTSTAT 0x0003 /* Transmit FIFO Status */
+#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
+#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
+#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
+
+#define RCVSTAT 0x000C /* Receive FIFO Status */
+#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
+#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
+#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index 7a8ac5f4420..8100bcd01a0 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1,859 +1,13 @@
/*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
-/* SYSTEM & MM REGISTER BIT & ADDRESS DEFINITIONS FOR ADSP-BF538/9 */
-
#ifndef _DEF_BF539_H
#define _DEF_BF539_H
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-
-/*********************************************************************************** */
-/* System MMR Register Map */
-/*********************************************************************************** */
-/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
-#define PLL_CTL 0xFFC00000 /* PLL Control register (16-bit) */
-#define PLL_DIV 0xFFC00004 /* PLL Divide Register (16-bit) */
-#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register (16-bit) */
-#define PLL_STAT 0xFFC0000C /* PLL Status register (16-bit) */
-#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count register (16-bit) */
-#define CHIPID 0xFFC00014 /* Chip ID Register */
-
-/* CHIPID Masks */
-#define CHIPID_VERSION 0xF0000000
-#define CHIPID_FAMILY 0x0FFFF000
-#define CHIPID_MANUFACTURE 0x00000FFE
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
-#define SWRST 0xFFC00100 /* Software Reset Register (16-bit) */
-#define SYSCR 0xFFC00104 /* System Configuration registe */
-#define SIC_RVECT 0xFFC00108
-#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
-#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
-#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
-#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
-#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
-#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
-#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
-#define SIC_IMASK1 0xFFC00128 /* Interrupt Mask Register 1 */
-#define SIC_ISR1 0xFFC0012C /* Interrupt Status Register 1 */
-#define SIC_IWR1 0xFFC00130 /* Interrupt Wakeup Register 1 */
-#define SIC_IAR4 0xFFC00134 /* Interrupt Assignment Register 4 */
-#define SIC_IAR5 0xFFC00138 /* Interrupt Assignment Register 5 */
-#define SIC_IAR6 0xFFC0013C /* Interrupt Assignment Register 6 */
-
-
-/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
-#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
-#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
-#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
-
-
-/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
-#define RTC_STAT 0xFFC00300 /* RTC Status Register */
-#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
-#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
-#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
-#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
-#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
-#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Register (alternate macro) */
-
-
-/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
-#define UART0_THR 0xFFC00400 /* Transmit Holding register */
-#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
-#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
-#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
-#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
-#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
-#define UART0_LCR 0xFFC0040C /* Line Control Register */
-#define UART0_MCR 0xFFC00410 /* Modem Control Register */
-#define UART0_LSR 0xFFC00414 /* Line Status Register */
-#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
-#define UART0_GCTL 0xFFC00424 /* Global Control Register */
-
-
-/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
-
-#define SPI0_CTL 0xFFC00500 /* SPI0 Control Register */
-#define SPI0_FLG 0xFFC00504 /* SPI0 Flag register */
-#define SPI0_STAT 0xFFC00508 /* SPI0 Status register */
-#define SPI0_TDBR 0xFFC0050C /* SPI0 Transmit Data Buffer Register */
-#define SPI0_RDBR 0xFFC00510 /* SPI0 Receive Data Buffer Register */
-#define SPI0_BAUD 0xFFC00514 /* SPI0 Baud rate Register */
-#define SPI0_SHADOW 0xFFC00518 /* SPI0_RDBR Shadow Register */
-#define SPI0_REGBASE SPI0_CTL
-
-
-/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
-#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
-#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
-#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
-#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
-
-#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
-#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
-#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
-#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
-
-#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
-#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
-#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
-#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
-
-#define TIMER_ENABLE 0xFFC00640 /* Timer Enable Register */
-#define TIMER_DISABLE 0xFFC00644 /* Timer Disable Register */
-#define TIMER_STATUS 0xFFC00648 /* Timer Status Register */
-
-
-/* Programmable Flags (0xFFC00700 - 0xFFC007FF) */
-#define FIO_FLAG_D 0xFFC00700 /* Flag Mask to directly specify state of pins */
-#define FIO_FLAG_C 0xFFC00704 /* Peripheral Interrupt Flag Register (clear) */
-#define FIO_FLAG_S 0xFFC00708 /* Peripheral Interrupt Flag Register (set) */
-#define FIO_FLAG_T 0xFFC0070C /* Flag Mask to directly toggle state of pins */
-#define FIO_MASKA_D 0xFFC00710 /* Flag Mask Interrupt A Register (set directly) */
-#define FIO_MASKA_C 0xFFC00714 /* Flag Mask Interrupt A Register (clear) */
-#define FIO_MASKA_S 0xFFC00718 /* Flag Mask Interrupt A Register (set) */
-#define FIO_MASKA_T 0xFFC0071C /* Flag Mask Interrupt A Register (toggle) */
-#define FIO_MASKB_D 0xFFC00720 /* Flag Mask Interrupt B Register (set directly) */
-#define FIO_MASKB_C 0xFFC00724 /* Flag Mask Interrupt B Register (clear) */
-#define FIO_MASKB_S 0xFFC00728 /* Flag Mask Interrupt B Register (set) */
-#define FIO_MASKB_T 0xFFC0072C /* Flag Mask Interrupt B Register (toggle) */
-#define FIO_DIR 0xFFC00730 /* Peripheral Flag Direction Register */
-#define FIO_POLAR 0xFFC00734 /* Flag Source Polarity Register */
-#define FIO_EDGE 0xFFC00738 /* Flag Source Sensitivity Register */
-#define FIO_BOTH 0xFFC0073C /* Flag Set on BOTH Edges Register */
-#define FIO_INEN 0xFFC00740 /* Flag Input Enable Register */
-
-
-/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
-#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
-#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
-#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
-#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
-#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
-#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
-#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
-#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
-#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
-#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
-#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
-#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
-#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
-#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
-#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
-#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
-#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
-#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
-
-
-/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
-#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
-#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
-#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
-#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
-#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
-#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
-#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
-#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
-#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
-#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
-#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
-#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
-#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
-#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
-#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
-#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
-#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
-#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
-#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
-#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
-#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
-#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
-/* Asynchronous Memory Controller */
-#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
-#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
-#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
-
-/* SDRAM Controller */
-#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
-#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
-#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
-#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
-
-
-
-/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
-
-#define DMAC0_TC_PER 0xFFC00B0C /* DMA Controller 0 Traffic Control Periods Register */
-#define DMAC0_TC_CNT 0xFFC00B10 /* DMA Controller 0 Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA0_TCPER DMAC0_TC_PER
-#define DMA0_TCCNT DMAC0_TC_CNT
-
-
-/* DMA Controller 0 (0xFFC00C00 - 0xFFC00FFF) */
-
-#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
-#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
-#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
-#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
-#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
-#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
-#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
-#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
-#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
-#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
-#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
-#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
-#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
-
-#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
-#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
-#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
-#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
-#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
-#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
-#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
-#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
-#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
-#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
-#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
-#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
-#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
-
-#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
-#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
-#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
-#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
-#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
-#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
-#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
-#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
-#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
-#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
-#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
-#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
-#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
-
-#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
-#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
-#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
-#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
-#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
-#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
-#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
-#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
-#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
-#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
-#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
-#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
-#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
-
-#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
-#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
-#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
-#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
-#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
-#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
-#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
-#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
-#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
-#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
-#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
-#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
-#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
-
-#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
-#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
-#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
-#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
-#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
-#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
-#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
-#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
-#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
-#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
-#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
-#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
-#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
-
-#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
-#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
-#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
-#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
-#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
-#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
-#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
-#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
-#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
-#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
-#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
-#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
-#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
-
-#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
-#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
-#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
-#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
-#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
-#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
-#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
-#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
-#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
-#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
-#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
-#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
-#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
-
-#define MDMA0_D0_NEXT_DESC_PTR 0xFFC00E00 /* MemDMA0 Stream 0 Destination Next Descriptor Pointer Register */
-#define MDMA0_D0_START_ADDR 0xFFC00E04 /* MemDMA0 Stream 0 Destination Start Address Register */
-#define MDMA0_D0_CONFIG 0xFFC00E08 /* MemDMA0 Stream 0 Destination Configuration Register */
-#define MDMA0_D0_X_COUNT 0xFFC00E10 /* MemDMA0 Stream 0 Destination X Count Register */
-#define MDMA0_D0_X_MODIFY 0xFFC00E14 /* MemDMA0 Stream 0 Destination X Modify Register */
-#define MDMA0_D0_Y_COUNT 0xFFC00E18 /* MemDMA0 Stream 0 Destination Y Count Register */
-#define MDMA0_D0_Y_MODIFY 0xFFC00E1C /* MemDMA0 Stream 0 Destination Y Modify Register */
-#define MDMA0_D0_CURR_DESC_PTR 0xFFC00E20 /* MemDMA0 Stream 0 Destination Current Descriptor Pointer Register */
-#define MDMA0_D0_CURR_ADDR 0xFFC00E24 /* MemDMA0 Stream 0 Destination Current Address Register */
-#define MDMA0_D0_IRQ_STATUS 0xFFC00E28 /* MemDMA0 Stream 0 Destination Interrupt/Status Register */
-#define MDMA0_D0_PERIPHERAL_MAP 0xFFC00E2C /* MemDMA0 Stream 0 Destination Peripheral Map Register */
-#define MDMA0_D0_CURR_X_COUNT 0xFFC00E30 /* MemDMA0 Stream 0 Destination Current X Count Register */
-#define MDMA0_D0_CURR_Y_COUNT 0xFFC00E38 /* MemDMA0 Stream 0 Destination Current Y Count Register */
-
-#define MDMA0_S0_NEXT_DESC_PTR 0xFFC00E40 /* MemDMA0 Stream 0 Source Next Descriptor Pointer Register */
-#define MDMA0_S0_START_ADDR 0xFFC00E44 /* MemDMA0 Stream 0 Source Start Address Register */
-#define MDMA0_S0_CONFIG 0xFFC00E48 /* MemDMA0 Stream 0 Source Configuration Register */
-#define MDMA0_S0_X_COUNT 0xFFC00E50 /* MemDMA0 Stream 0 Source X Count Register */
-#define MDMA0_S0_X_MODIFY 0xFFC00E54 /* MemDMA0 Stream 0 Source X Modify Register */
-#define MDMA0_S0_Y_COUNT 0xFFC00E58 /* MemDMA0 Stream 0 Source Y Count Register */
-#define MDMA0_S0_Y_MODIFY 0xFFC00E5C /* MemDMA0 Stream 0 Source Y Modify Register */
-#define MDMA0_S0_CURR_DESC_PTR 0xFFC00E60 /* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
-#define MDMA0_S0_CURR_ADDR 0xFFC00E64 /* MemDMA0 Stream 0 Source Current Address Register */
-#define MDMA0_S0_IRQ_STATUS 0xFFC00E68 /* MemDMA0 Stream 0 Source Interrupt/Status Register */
-#define MDMA0_S0_PERIPHERAL_MAP 0xFFC00E6C /* MemDMA0 Stream 0 Source Peripheral Map Register */
-#define MDMA0_S0_CURR_X_COUNT 0xFFC00E70 /* MemDMA0 Stream 0 Source Current X Count Register */
-#define MDMA0_S0_CURR_Y_COUNT 0xFFC00E78 /* MemDMA0 Stream 0 Source Current Y Count Register */
-
-#define MDMA0_D1_NEXT_DESC_PTR 0xFFC00E80 /* MemDMA0 Stream 1 Destination Next Descriptor Pointer Register */
-#define MDMA0_D1_START_ADDR 0xFFC00E84 /* MemDMA0 Stream 1 Destination Start Address Register */
-#define MDMA0_D1_CONFIG 0xFFC00E88 /* MemDMA0 Stream 1 Destination Configuration Register */
-#define MDMA0_D1_X_COUNT 0xFFC00E90 /* MemDMA0 Stream 1 Destination X Count Register */
-#define MDMA0_D1_X_MODIFY 0xFFC00E94 /* MemDMA0 Stream 1 Destination X Modify Register */
-#define MDMA0_D1_Y_COUNT 0xFFC00E98 /* MemDMA0 Stream 1 Destination Y Count Register */
-#define MDMA0_D1_Y_MODIFY 0xFFC00E9C /* MemDMA0 Stream 1 Destination Y Modify Register */
-#define MDMA0_D1_CURR_DESC_PTR 0xFFC00EA0 /* MemDMA0 Stream 1 Destination Current Descriptor Pointer Register */
-#define MDMA0_D1_CURR_ADDR 0xFFC00EA4 /* MemDMA0 Stream 1 Destination Current Address Register */
-#define MDMA0_D1_IRQ_STATUS 0xFFC00EA8 /* MemDMA0 Stream 1 Destination Interrupt/Status Register */
-#define MDMA0_D1_PERIPHERAL_MAP 0xFFC00EAC /* MemDMA0 Stream 1 Destination Peripheral Map Register */
-#define MDMA0_D1_CURR_X_COUNT 0xFFC00EB0 /* MemDMA0 Stream 1 Destination Current X Count Register */
-#define MDMA0_D1_CURR_Y_COUNT 0xFFC00EB8 /* MemDMA0 Stream 1 Destination Current Y Count Register */
-
-#define MDMA0_S1_NEXT_DESC_PTR 0xFFC00EC0 /* MemDMA0 Stream 1 Source Next Descriptor Pointer Register */
-#define MDMA0_S1_START_ADDR 0xFFC00EC4 /* MemDMA0 Stream 1 Source Start Address Register */
-#define MDMA0_S1_CONFIG 0xFFC00EC8 /* MemDMA0 Stream 1 Source Configuration Register */
-#define MDMA0_S1_X_COUNT 0xFFC00ED0 /* MemDMA0 Stream 1 Source X Count Register */
-#define MDMA0_S1_X_MODIFY 0xFFC00ED4 /* MemDMA0 Stream 1 Source X Modify Register */
-#define MDMA0_S1_Y_COUNT 0xFFC00ED8 /* MemDMA0 Stream 1 Source Y Count Register */
-#define MDMA0_S1_Y_MODIFY 0xFFC00EDC /* MemDMA0 Stream 1 Source Y Modify Register */
-#define MDMA0_S1_CURR_DESC_PTR 0xFFC00EE0 /* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
-#define MDMA0_S1_CURR_ADDR 0xFFC00EE4 /* MemDMA0 Stream 1 Source Current Address Register */
-#define MDMA0_S1_IRQ_STATUS 0xFFC00EE8 /* MemDMA0 Stream 1 Source Interrupt/Status Register */
-#define MDMA0_S1_PERIPHERAL_MAP 0xFFC00EEC /* MemDMA0 Stream 1 Source Peripheral Map Register */
-#define MDMA0_S1_CURR_X_COUNT 0xFFC00EF0 /* MemDMA0 Stream 1 Source Current X Count Register */
-#define MDMA0_S1_CURR_Y_COUNT 0xFFC00EF8 /* MemDMA0 Stream 1 Source Current Y Count Register */
-
-#define MDMA_D0_NEXT_DESC_PTR MDMA0_D0_NEXT_DESC_PTR
-#define MDMA_D0_START_ADDR MDMA0_D0_START_ADDR
-#define MDMA_D0_CONFIG MDMA0_D0_CONFIG
-#define MDMA_D0_X_COUNT MDMA0_D0_X_COUNT
-#define MDMA_D0_X_MODIFY MDMA0_D0_X_MODIFY
-#define MDMA_D0_Y_COUNT MDMA0_D0_Y_COUNT
-#define MDMA_D0_Y_MODIFY MDMA0_D0_Y_MODIFY
-#define MDMA_D0_CURR_DESC_PTR MDMA0_D0_CURR_DESC_PTR
-#define MDMA_D0_CURR_ADDR MDMA0_D0_CURR_ADDR
-#define MDMA_D0_IRQ_STATUS MDMA0_D0_IRQ_STATUS
-#define MDMA_D0_PERIPHERAL_MAP MDMA0_D0_PERIPHERAL_MAP
-#define MDMA_D0_CURR_X_COUNT MDMA0_D0_CURR_X_COUNT
-#define MDMA_D0_CURR_Y_COUNT MDMA0_D0_CURR_Y_COUNT
-
-#define MDMA_S0_NEXT_DESC_PTR MDMA0_S0_NEXT_DESC_PTR
-#define MDMA_S0_START_ADDR MDMA0_S0_START_ADDR
-#define MDMA_S0_CONFIG MDMA0_S0_CONFIG
-#define MDMA_S0_X_COUNT MDMA0_S0_X_COUNT
-#define MDMA_S0_X_MODIFY MDMA0_S0_X_MODIFY
-#define MDMA_S0_Y_COUNT MDMA0_S0_Y_COUNT
-#define MDMA_S0_Y_MODIFY MDMA0_S0_Y_MODIFY
-#define MDMA_S0_CURR_DESC_PTR MDMA0_S0_CURR_DESC_PTR
-#define MDMA_S0_CURR_ADDR MDMA0_S0_CURR_ADDR
-#define MDMA_S0_IRQ_STATUS MDMA0_S0_IRQ_STATUS
-#define MDMA_S0_PERIPHERAL_MAP MDMA0_S0_PERIPHERAL_MAP
-#define MDMA_S0_CURR_X_COUNT MDMA0_S0_CURR_X_COUNT
-#define MDMA_S0_CURR_Y_COUNT MDMA0_S0_CURR_Y_COUNT
-
-#define MDMA_D1_NEXT_DESC_PTR MDMA0_D1_NEXT_DESC_PTR
-#define MDMA_D1_START_ADDR MDMA0_D1_START_ADDR
-#define MDMA_D1_CONFIG MDMA0_D1_CONFIG
-#define MDMA_D1_X_COUNT MDMA0_D1_X_COUNT
-#define MDMA_D1_X_MODIFY MDMA0_D1_X_MODIFY
-#define MDMA_D1_Y_COUNT MDMA0_D1_Y_COUNT
-#define MDMA_D1_Y_MODIFY MDMA0_D1_Y_MODIFY
-#define MDMA_D1_CURR_DESC_PTR MDMA0_D1_CURR_DESC_PTR
-#define MDMA_D1_CURR_ADDR MDMA0_D1_CURR_ADDR
-#define MDMA_D1_IRQ_STATUS MDMA0_D1_IRQ_STATUS
-#define MDMA_D1_PERIPHERAL_MAP MDMA0_D1_PERIPHERAL_MAP
-#define MDMA_D1_CURR_X_COUNT MDMA0_D1_CURR_X_COUNT
-#define MDMA_D1_CURR_Y_COUNT MDMA0_D1_CURR_Y_COUNT
-
-#define MDMA_S1_NEXT_DESC_PTR MDMA0_S1_NEXT_DESC_PTR
-#define MDMA_S1_START_ADDR MDMA0_S1_START_ADDR
-#define MDMA_S1_CONFIG MDMA0_S1_CONFIG
-#define MDMA_S1_X_COUNT MDMA0_S1_X_COUNT
-#define MDMA_S1_X_MODIFY MDMA0_S1_X_MODIFY
-#define MDMA_S1_Y_COUNT MDMA0_S1_Y_COUNT
-#define MDMA_S1_Y_MODIFY MDMA0_S1_Y_MODIFY
-#define MDMA_S1_CURR_DESC_PTR MDMA0_S1_CURR_DESC_PTR
-#define MDMA_S1_CURR_ADDR MDMA0_S1_CURR_ADDR
-#define MDMA_S1_IRQ_STATUS MDMA0_S1_IRQ_STATUS
-#define MDMA_S1_PERIPHERAL_MAP MDMA0_S1_PERIPHERAL_MAP
-#define MDMA_S1_CURR_X_COUNT MDMA0_S1_CURR_X_COUNT
-#define MDMA_S1_CURR_Y_COUNT MDMA0_S1_CURR_Y_COUNT
-
-
-/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
-#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
-#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
-#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
-#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
-#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
-
-
-/* Two-Wire Interface 0 (0xFFC01400 - 0xFFC014FF) */
-#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
-#define TWI0_CONTROL 0xFFC01404 /* TWI0 Master Internal Time Reference Register */
-#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
-#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
-#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
-#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
-#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
-#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
-#define TWI0_INT_STAT 0xFFC01420 /* TWI0 Master Interrupt Register */
-#define TWI0_INT_MASK 0xFFC01424 /* TWI0 Master Interrupt Mask Register */
-#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
-#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
-#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
-#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
-#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
-#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
-
-#define TWI0_REGBASE TWI0_CLKDIV
-
-/* the following are for backwards compatibility */
-#define TWI0_PRESCALE TWI0_CONTROL
-#define TWI0_INT_SRC TWI0_INT_STAT
-#define TWI0_INT_ENABLE TWI0_INT_MASK
-
-
-/* General-Purpose Ports (0xFFC01500 - 0xFFC015FF) */
-
-/* GPIO Port C Register Names */
-#define PORTCIO_FER 0xFFC01500 /* GPIO Pin Port C Configuration Register */
-#define PORTCIO 0xFFC01510 /* GPIO Pin Port C Data Register */
-#define PORTCIO_CLEAR 0xFFC01520 /* Clear GPIO Pin Port C Register */
-#define PORTCIO_SET 0xFFC01530 /* Set GPIO Pin Port C Register */
-#define PORTCIO_TOGGLE 0xFFC01540 /* Toggle GPIO Pin Port C Register */
-#define PORTCIO_DIR 0xFFC01550 /* GPIO Pin Port C Direction Register */
-#define PORTCIO_INEN 0xFFC01560 /* GPIO Pin Port C Input Enable Register */
-
-/* GPIO Port D Register Names */
-#define PORTDIO_FER 0xFFC01504 /* GPIO Pin Port D Configuration Register */
-#define PORTDIO 0xFFC01514 /* GPIO Pin Port D Data Register */
-#define PORTDIO_CLEAR 0xFFC01524 /* Clear GPIO Pin Port D Register */
-#define PORTDIO_SET 0xFFC01534 /* Set GPIO Pin Port D Register */
-#define PORTDIO_TOGGLE 0xFFC01544 /* Toggle GPIO Pin Port D Register */
-#define PORTDIO_DIR 0xFFC01554 /* GPIO Pin Port D Direction Register */
-#define PORTDIO_INEN 0xFFC01564 /* GPIO Pin Port D Input Enable Register */
-
-/* GPIO Port E Register Names */
-#define PORTEIO_FER 0xFFC01508 /* GPIO Pin Port E Configuration Register */
-#define PORTEIO 0xFFC01518 /* GPIO Pin Port E Data Register */
-#define PORTEIO_CLEAR 0xFFC01528 /* Clear GPIO Pin Port E Register */
-#define PORTEIO_SET 0xFFC01538 /* Set GPIO Pin Port E Register */
-#define PORTEIO_TOGGLE 0xFFC01548 /* Toggle GPIO Pin Port E Register */
-#define PORTEIO_DIR 0xFFC01558 /* GPIO Pin Port E Direction Register */
-#define PORTEIO_INEN 0xFFC01568 /* GPIO Pin Port E Input Enable Register */
-
-/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
-
-#define DMAC1_TC_PER 0xFFC01B0C /* DMA Controller 1 Traffic Control Periods Register */
-#define DMAC1_TC_CNT 0xFFC01B10 /* DMA Controller 1 Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA1_TCPER DMAC1_TC_PER
-#define DMA1_TCCNT DMAC1_TC_CNT
-
-
-/* DMA Controller 1 (0xFFC01C00 - 0xFFC01FFF) */
-#define DMA8_NEXT_DESC_PTR 0xFFC01C00 /* DMA Channel 8 Next Descriptor Pointer Register */
-#define DMA8_START_ADDR 0xFFC01C04 /* DMA Channel 8 Start Address Register */
-#define DMA8_CONFIG 0xFFC01C08 /* DMA Channel 8 Configuration Register */
-#define DMA8_X_COUNT 0xFFC01C10 /* DMA Channel 8 X Count Register */
-#define DMA8_X_MODIFY 0xFFC01C14 /* DMA Channel 8 X Modify Register */
-#define DMA8_Y_COUNT 0xFFC01C18 /* DMA Channel 8 Y Count Register */
-#define DMA8_Y_MODIFY 0xFFC01C1C /* DMA Channel 8 Y Modify Register */
-#define DMA8_CURR_DESC_PTR 0xFFC01C20 /* DMA Channel 8 Current Descriptor Pointer Register */
-#define DMA8_CURR_ADDR 0xFFC01C24 /* DMA Channel 8 Current Address Register */
-#define DMA8_IRQ_STATUS 0xFFC01C28 /* DMA Channel 8 Interrupt/Status Register */
-#define DMA8_PERIPHERAL_MAP 0xFFC01C2C /* DMA Channel 8 Peripheral Map Register */
-#define DMA8_CURR_X_COUNT 0xFFC01C30 /* DMA Channel 8 Current X Count Register */
-#define DMA8_CURR_Y_COUNT 0xFFC01C38 /* DMA Channel 8 Current Y Count Register */
-
-#define DMA9_NEXT_DESC_PTR 0xFFC01C40 /* DMA Channel 9 Next Descriptor Pointer Register */
-#define DMA9_START_ADDR 0xFFC01C44 /* DMA Channel 9 Start Address Register */
-#define DMA9_CONFIG 0xFFC01C48 /* DMA Channel 9 Configuration Register */
-#define DMA9_X_COUNT 0xFFC01C50 /* DMA Channel 9 X Count Register */
-#define DMA9_X_MODIFY 0xFFC01C54 /* DMA Channel 9 X Modify Register */
-#define DMA9_Y_COUNT 0xFFC01C58 /* DMA Channel 9 Y Count Register */
-#define DMA9_Y_MODIFY 0xFFC01C5C /* DMA Channel 9 Y Modify Register */
-#define DMA9_CURR_DESC_PTR 0xFFC01C60 /* DMA Channel 9 Current Descriptor Pointer Register */
-#define DMA9_CURR_ADDR 0xFFC01C64 /* DMA Channel 9 Current Address Register */
-#define DMA9_IRQ_STATUS 0xFFC01C68 /* DMA Channel 9 Interrupt/Status Register */
-#define DMA9_PERIPHERAL_MAP 0xFFC01C6C /* DMA Channel 9 Peripheral Map Register */
-#define DMA9_CURR_X_COUNT 0xFFC01C70 /* DMA Channel 9 Current X Count Register */
-#define DMA9_CURR_Y_COUNT 0xFFC01C78 /* DMA Channel 9 Current Y Count Register */
-
-#define DMA10_NEXT_DESC_PTR 0xFFC01C80 /* DMA Channel 10 Next Descriptor Pointer Register */
-#define DMA10_START_ADDR 0xFFC01C84 /* DMA Channel 10 Start Address Register */
-#define DMA10_CONFIG 0xFFC01C88 /* DMA Channel 10 Configuration Register */
-#define DMA10_X_COUNT 0xFFC01C90 /* DMA Channel 10 X Count Register */
-#define DMA10_X_MODIFY 0xFFC01C94 /* DMA Channel 10 X Modify Register */
-#define DMA10_Y_COUNT 0xFFC01C98 /* DMA Channel 10 Y Count Register */
-#define DMA10_Y_MODIFY 0xFFC01C9C /* DMA Channel 10 Y Modify Register */
-#define DMA10_CURR_DESC_PTR 0xFFC01CA0 /* DMA Channel 10 Current Descriptor Pointer Register */
-#define DMA10_CURR_ADDR 0xFFC01CA4 /* DMA Channel 10 Current Address Register */
-#define DMA10_IRQ_STATUS 0xFFC01CA8 /* DMA Channel 10 Interrupt/Status Register */
-#define DMA10_PERIPHERAL_MAP 0xFFC01CAC /* DMA Channel 10 Peripheral Map Register */
-#define DMA10_CURR_X_COUNT 0xFFC01CB0 /* DMA Channel 10 Current X Count Register */
-#define DMA10_CURR_Y_COUNT 0xFFC01CB8 /* DMA Channel 10 Current Y Count Register */
-
-#define DMA11_NEXT_DESC_PTR 0xFFC01CC0 /* DMA Channel 11 Next Descriptor Pointer Register */
-#define DMA11_START_ADDR 0xFFC01CC4 /* DMA Channel 11 Start Address Register */
-#define DMA11_CONFIG 0xFFC01CC8 /* DMA Channel 11 Configuration Register */
-#define DMA11_X_COUNT 0xFFC01CD0 /* DMA Channel 11 X Count Register */
-#define DMA11_X_MODIFY 0xFFC01CD4 /* DMA Channel 11 X Modify Register */
-#define DMA11_Y_COUNT 0xFFC01CD8 /* DMA Channel 11 Y Count Register */
-#define DMA11_Y_MODIFY 0xFFC01CDC /* DMA Channel 11 Y Modify Register */
-#define DMA11_CURR_DESC_PTR 0xFFC01CE0 /* DMA Channel 11 Current Descriptor Pointer Register */
-#define DMA11_CURR_ADDR 0xFFC01CE4 /* DMA Channel 11 Current Address Register */
-#define DMA11_IRQ_STATUS 0xFFC01CE8 /* DMA Channel 11 Interrupt/Status Register */
-#define DMA11_PERIPHERAL_MAP 0xFFC01CEC /* DMA Channel 11 Peripheral Map Register */
-#define DMA11_CURR_X_COUNT 0xFFC01CF0 /* DMA Channel 11 Current X Count Register */
-#define DMA11_CURR_Y_COUNT 0xFFC01CF8 /* DMA Channel 11 Current Y Count Register */
-
-#define DMA12_NEXT_DESC_PTR 0xFFC01D00 /* DMA Channel 12 Next Descriptor Pointer Register */
-#define DMA12_START_ADDR 0xFFC01D04 /* DMA Channel 12 Start Address Register */
-#define DMA12_CONFIG 0xFFC01D08 /* DMA Channel 12 Configuration Register */
-#define DMA12_X_COUNT 0xFFC01D10 /* DMA Channel 12 X Count Register */
-#define DMA12_X_MODIFY 0xFFC01D14 /* DMA Channel 12 X Modify Register */
-#define DMA12_Y_COUNT 0xFFC01D18 /* DMA Channel 12 Y Count Register */
-#define DMA12_Y_MODIFY 0xFFC01D1C /* DMA Channel 12 Y Modify Register */
-#define DMA12_CURR_DESC_PTR 0xFFC01D20 /* DMA Channel 12 Current Descriptor Pointer Register */
-#define DMA12_CURR_ADDR 0xFFC01D24 /* DMA Channel 12 Current Address Register */
-#define DMA12_IRQ_STATUS 0xFFC01D28 /* DMA Channel 12 Interrupt/Status Register */
-#define DMA12_PERIPHERAL_MAP 0xFFC01D2C /* DMA Channel 12 Peripheral Map Register */
-#define DMA12_CURR_X_COUNT 0xFFC01D30 /* DMA Channel 12 Current X Count Register */
-#define DMA12_CURR_Y_COUNT 0xFFC01D38 /* DMA Channel 12 Current Y Count Register */
-
-#define DMA13_NEXT_DESC_PTR 0xFFC01D40 /* DMA Channel 13 Next Descriptor Pointer Register */
-#define DMA13_START_ADDR 0xFFC01D44 /* DMA Channel 13 Start Address Register */
-#define DMA13_CONFIG 0xFFC01D48 /* DMA Channel 13 Configuration Register */
-#define DMA13_X_COUNT 0xFFC01D50 /* DMA Channel 13 X Count Register */
-#define DMA13_X_MODIFY 0xFFC01D54 /* DMA Channel 13 X Modify Register */
-#define DMA13_Y_COUNT 0xFFC01D58 /* DMA Channel 13 Y Count Register */
-#define DMA13_Y_MODIFY 0xFFC01D5C /* DMA Channel 13 Y Modify Register */
-#define DMA13_CURR_DESC_PTR 0xFFC01D60 /* DMA Channel 13 Current Descriptor Pointer Register */
-#define DMA13_CURR_ADDR 0xFFC01D64 /* DMA Channel 13 Current Address Register */
-#define DMA13_IRQ_STATUS 0xFFC01D68 /* DMA Channel 13 Interrupt/Status Register */
-#define DMA13_PERIPHERAL_MAP 0xFFC01D6C /* DMA Channel 13 Peripheral Map Register */
-#define DMA13_CURR_X_COUNT 0xFFC01D70 /* DMA Channel 13 Current X Count Register */
-#define DMA13_CURR_Y_COUNT 0xFFC01D78 /* DMA Channel 13 Current Y Count Register */
-
-#define DMA14_NEXT_DESC_PTR 0xFFC01D80 /* DMA Channel 14 Next Descriptor Pointer Register */
-#define DMA14_START_ADDR 0xFFC01D84 /* DMA Channel 14 Start Address Register */
-#define DMA14_CONFIG 0xFFC01D88 /* DMA Channel 14 Configuration Register */
-#define DMA14_X_COUNT 0xFFC01D90 /* DMA Channel 14 X Count Register */
-#define DMA14_X_MODIFY 0xFFC01D94 /* DMA Channel 14 X Modify Register */
-#define DMA14_Y_COUNT 0xFFC01D98 /* DMA Channel 14 Y Count Register */
-#define DMA14_Y_MODIFY 0xFFC01D9C /* DMA Channel 14 Y Modify Register */
-#define DMA14_CURR_DESC_PTR 0xFFC01DA0 /* DMA Channel 14 Current Descriptor Pointer Register */
-#define DMA14_CURR_ADDR 0xFFC01DA4 /* DMA Channel 14 Current Address Register */
-#define DMA14_IRQ_STATUS 0xFFC01DA8 /* DMA Channel 14 Interrupt/Status Register */
-#define DMA14_PERIPHERAL_MAP 0xFFC01DAC /* DMA Channel 14 Peripheral Map Register */
-#define DMA14_CURR_X_COUNT 0xFFC01DB0 /* DMA Channel 14 Current X Count Register */
-#define DMA14_CURR_Y_COUNT 0xFFC01DB8 /* DMA Channel 14 Current Y Count Register */
-
-#define DMA15_NEXT_DESC_PTR 0xFFC01DC0 /* DMA Channel 15 Next Descriptor Pointer Register */
-#define DMA15_START_ADDR 0xFFC01DC4 /* DMA Channel 15 Start Address Register */
-#define DMA15_CONFIG 0xFFC01DC8 /* DMA Channel 15 Configuration Register */
-#define DMA15_X_COUNT 0xFFC01DD0 /* DMA Channel 15 X Count Register */
-#define DMA15_X_MODIFY 0xFFC01DD4 /* DMA Channel 15 X Modify Register */
-#define DMA15_Y_COUNT 0xFFC01DD8 /* DMA Channel 15 Y Count Register */
-#define DMA15_Y_MODIFY 0xFFC01DDC /* DMA Channel 15 Y Modify Register */
-#define DMA15_CURR_DESC_PTR 0xFFC01DE0 /* DMA Channel 15 Current Descriptor Pointer Register */
-#define DMA15_CURR_ADDR 0xFFC01DE4 /* DMA Channel 15 Current Address Register */
-#define DMA15_IRQ_STATUS 0xFFC01DE8 /* DMA Channel 15 Interrupt/Status Register */
-#define DMA15_PERIPHERAL_MAP 0xFFC01DEC /* DMA Channel 15 Peripheral Map Register */
-#define DMA15_CURR_X_COUNT 0xFFC01DF0 /* DMA Channel 15 Current X Count Register */
-#define DMA15_CURR_Y_COUNT 0xFFC01DF8 /* DMA Channel 15 Current Y Count Register */
-
-#define DMA16_NEXT_DESC_PTR 0xFFC01E00 /* DMA Channel 16 Next Descriptor Pointer Register */
-#define DMA16_START_ADDR 0xFFC01E04 /* DMA Channel 16 Start Address Register */
-#define DMA16_CONFIG 0xFFC01E08 /* DMA Channel 16 Configuration Register */
-#define DMA16_X_COUNT 0xFFC01E10 /* DMA Channel 16 X Count Register */
-#define DMA16_X_MODIFY 0xFFC01E14 /* DMA Channel 16 X Modify Register */
-#define DMA16_Y_COUNT 0xFFC01E18 /* DMA Channel 16 Y Count Register */
-#define DMA16_Y_MODIFY 0xFFC01E1C /* DMA Channel 16 Y Modify Register */
-#define DMA16_CURR_DESC_PTR 0xFFC01E20 /* DMA Channel 16 Current Descriptor Pointer Register */
-#define DMA16_CURR_ADDR 0xFFC01E24 /* DMA Channel 16 Current Address Register */
-#define DMA16_IRQ_STATUS 0xFFC01E28 /* DMA Channel 16 Interrupt/Status Register */
-#define DMA16_PERIPHERAL_MAP 0xFFC01E2C /* DMA Channel 16 Peripheral Map Register */
-#define DMA16_CURR_X_COUNT 0xFFC01E30 /* DMA Channel 16 Current X Count Register */
-#define DMA16_CURR_Y_COUNT 0xFFC01E38 /* DMA Channel 16 Current Y Count Register */
-
-#define DMA17_NEXT_DESC_PTR 0xFFC01E40 /* DMA Channel 17 Next Descriptor Pointer Register */
-#define DMA17_START_ADDR 0xFFC01E44 /* DMA Channel 17 Start Address Register */
-#define DMA17_CONFIG 0xFFC01E48 /* DMA Channel 17 Configuration Register */
-#define DMA17_X_COUNT 0xFFC01E50 /* DMA Channel 17 X Count Register */
-#define DMA17_X_MODIFY 0xFFC01E54 /* DMA Channel 17 X Modify Register */
-#define DMA17_Y_COUNT 0xFFC01E58 /* DMA Channel 17 Y Count Register */
-#define DMA17_Y_MODIFY 0xFFC01E5C /* DMA Channel 17 Y Modify Register */
-#define DMA17_CURR_DESC_PTR 0xFFC01E60 /* DMA Channel 17 Current Descriptor Pointer Register */
-#define DMA17_CURR_ADDR 0xFFC01E64 /* DMA Channel 17 Current Address Register */
-#define DMA17_IRQ_STATUS 0xFFC01E68 /* DMA Channel 17 Interrupt/Status Register */
-#define DMA17_PERIPHERAL_MAP 0xFFC01E6C /* DMA Channel 17 Peripheral Map Register */
-#define DMA17_CURR_X_COUNT 0xFFC01E70 /* DMA Channel 17 Current X Count Register */
-#define DMA17_CURR_Y_COUNT 0xFFC01E78 /* DMA Channel 17 Current Y Count Register */
-
-#define DMA18_NEXT_DESC_PTR 0xFFC01E80 /* DMA Channel 18 Next Descriptor Pointer Register */
-#define DMA18_START_ADDR 0xFFC01E84 /* DMA Channel 18 Start Address Register */
-#define DMA18_CONFIG 0xFFC01E88 /* DMA Channel 18 Configuration Register */
-#define DMA18_X_COUNT 0xFFC01E90 /* DMA Channel 18 X Count Register */
-#define DMA18_X_MODIFY 0xFFC01E94 /* DMA Channel 18 X Modify Register */
-#define DMA18_Y_COUNT 0xFFC01E98 /* DMA Channel 18 Y Count Register */
-#define DMA18_Y_MODIFY 0xFFC01E9C /* DMA Channel 18 Y Modify Register */
-#define DMA18_CURR_DESC_PTR 0xFFC01EA0 /* DMA Channel 18 Current Descriptor Pointer Register */
-#define DMA18_CURR_ADDR 0xFFC01EA4 /* DMA Channel 18 Current Address Register */
-#define DMA18_IRQ_STATUS 0xFFC01EA8 /* DMA Channel 18 Interrupt/Status Register */
-#define DMA18_PERIPHERAL_MAP 0xFFC01EAC /* DMA Channel 18 Peripheral Map Register */
-#define DMA18_CURR_X_COUNT 0xFFC01EB0 /* DMA Channel 18 Current X Count Register */
-#define DMA18_CURR_Y_COUNT 0xFFC01EB8 /* DMA Channel 18 Current Y Count Register */
-
-#define DMA19_NEXT_DESC_PTR 0xFFC01EC0 /* DMA Channel 19 Next Descriptor Pointer Register */
-#define DMA19_START_ADDR 0xFFC01EC4 /* DMA Channel 19 Start Address Register */
-#define DMA19_CONFIG 0xFFC01EC8 /* DMA Channel 19 Configuration Register */
-#define DMA19_X_COUNT 0xFFC01ED0 /* DMA Channel 19 X Count Register */
-#define DMA19_X_MODIFY 0xFFC01ED4 /* DMA Channel 19 X Modify Register */
-#define DMA19_Y_COUNT 0xFFC01ED8 /* DMA Channel 19 Y Count Register */
-#define DMA19_Y_MODIFY 0xFFC01EDC /* DMA Channel 19 Y Modify Register */
-#define DMA19_CURR_DESC_PTR 0xFFC01EE0 /* DMA Channel 19 Current Descriptor Pointer Register */
-#define DMA19_CURR_ADDR 0xFFC01EE4 /* DMA Channel 19 Current Address Register */
-#define DMA19_IRQ_STATUS 0xFFC01EE8 /* DMA Channel 19 Interrupt/Status Register */
-#define DMA19_PERIPHERAL_MAP 0xFFC01EEC /* DMA Channel 19 Peripheral Map Register */
-#define DMA19_CURR_X_COUNT 0xFFC01EF0 /* DMA Channel 19 Current X Count Register */
-#define DMA19_CURR_Y_COUNT 0xFFC01EF8 /* DMA Channel 19 Current Y Count Register */
-
-#define MDMA1_D0_NEXT_DESC_PTR 0xFFC01F00 /* MemDMA1 Stream 0 Destination Next Descriptor Pointer Register */
-#define MDMA1_D0_START_ADDR 0xFFC01F04 /* MemDMA1 Stream 0 Destination Start Address Register */
-#define MDMA1_D0_CONFIG 0xFFC01F08 /* MemDMA1 Stream 0 Destination Configuration Register */
-#define MDMA1_D0_X_COUNT 0xFFC01F10 /* MemDMA1 Stream 0 Destination X Count Register */
-#define MDMA1_D0_X_MODIFY 0xFFC01F14 /* MemDMA1 Stream 0 Destination X Modify Register */
-#define MDMA1_D0_Y_COUNT 0xFFC01F18 /* MemDMA1 Stream 0 Destination Y Count Register */
-#define MDMA1_D0_Y_MODIFY 0xFFC01F1C /* MemDMA1 Stream 0 Destination Y Modify Register */
-#define MDMA1_D0_CURR_DESC_PTR 0xFFC01F20 /* MemDMA1 Stream 0 Destination Current Descriptor Pointer Register */
-#define MDMA1_D0_CURR_ADDR 0xFFC01F24 /* MemDMA1 Stream 0 Destination Current Address Register */
-#define MDMA1_D0_IRQ_STATUS 0xFFC01F28 /* MemDMA1 Stream 0 Destination Interrupt/Status Register */
-#define MDMA1_D0_PERIPHERAL_MAP 0xFFC01F2C /* MemDMA1 Stream 0 Destination Peripheral Map Register */
-#define MDMA1_D0_CURR_X_COUNT 0xFFC01F30 /* MemDMA1 Stream 0 Destination Current X Count Register */
-#define MDMA1_D0_CURR_Y_COUNT 0xFFC01F38 /* MemDMA1 Stream 0 Destination Current Y Count Register */
-
-#define MDMA1_S0_NEXT_DESC_PTR 0xFFC01F40 /* MemDMA1 Stream 0 Source Next Descriptor Pointer Register */
-#define MDMA1_S0_START_ADDR 0xFFC01F44 /* MemDMA1 Stream 0 Source Start Address Register */
-#define MDMA1_S0_CONFIG 0xFFC01F48 /* MemDMA1 Stream 0 Source Configuration Register */
-#define MDMA1_S0_X_COUNT 0xFFC01F50 /* MemDMA1 Stream 0 Source X Count Register */
-#define MDMA1_S0_X_MODIFY 0xFFC01F54 /* MemDMA1 Stream 0 Source X Modify Register */
-#define MDMA1_S0_Y_COUNT 0xFFC01F58 /* MemDMA1 Stream 0 Source Y Count Register */
-#define MDMA1_S0_Y_MODIFY 0xFFC01F5C /* MemDMA1 Stream 0 Source Y Modify Register */
-#define MDMA1_S0_CURR_DESC_PTR 0xFFC01F60 /* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
-#define MDMA1_S0_CURR_ADDR 0xFFC01F64 /* MemDMA1 Stream 0 Source Current Address Register */
-#define MDMA1_S0_IRQ_STATUS 0xFFC01F68 /* MemDMA1 Stream 0 Source Interrupt/Status Register */
-#define MDMA1_S0_PERIPHERAL_MAP 0xFFC01F6C /* MemDMA1 Stream 0 Source Peripheral Map Register */
-#define MDMA1_S0_CURR_X_COUNT 0xFFC01F70 /* MemDMA1 Stream 0 Source Current X Count Register */
-#define MDMA1_S0_CURR_Y_COUNT 0xFFC01F78 /* MemDMA1 Stream 0 Source Current Y Count Register */
-
-#define MDMA1_D1_NEXT_DESC_PTR 0xFFC01F80 /* MemDMA1 Stream 1 Destination Next Descriptor Pointer Register */
-#define MDMA1_D1_START_ADDR 0xFFC01F84 /* MemDMA1 Stream 1 Destination Start Address Register */
-#define MDMA1_D1_CONFIG 0xFFC01F88 /* MemDMA1 Stream 1 Destination Configuration Register */
-#define MDMA1_D1_X_COUNT 0xFFC01F90 /* MemDMA1 Stream 1 Destination X Count Register */
-#define MDMA1_D1_X_MODIFY 0xFFC01F94 /* MemDMA1 Stream 1 Destination X Modify Register */
-#define MDMA1_D1_Y_COUNT 0xFFC01F98 /* MemDMA1 Stream 1 Destination Y Count Register */
-#define MDMA1_D1_Y_MODIFY 0xFFC01F9C /* MemDMA1 Stream 1 Destination Y Modify Register */
-#define MDMA1_D1_CURR_DESC_PTR 0xFFC01FA0 /* MemDMA1 Stream 1 Destination Current Descriptor Pointer Register */
-#define MDMA1_D1_CURR_ADDR 0xFFC01FA4 /* MemDMA1 Stream 1 Destination Current Address Register */
-#define MDMA1_D1_IRQ_STATUS 0xFFC01FA8 /* MemDMA1 Stream 1 Destination Interrupt/Status Register */
-#define MDMA1_D1_PERIPHERAL_MAP 0xFFC01FAC /* MemDMA1 Stream 1 Destination Peripheral Map Register */
-#define MDMA1_D1_CURR_X_COUNT 0xFFC01FB0 /* MemDMA1 Stream 1 Destination Current X Count Register */
-#define MDMA1_D1_CURR_Y_COUNT 0xFFC01FB8 /* MemDMA1 Stream 1 Destination Current Y Count Register */
-
-#define MDMA1_S1_NEXT_DESC_PTR 0xFFC01FC0 /* MemDMA1 Stream 1 Source Next Descriptor Pointer Register */
-#define MDMA1_S1_START_ADDR 0xFFC01FC4 /* MemDMA1 Stream 1 Source Start Address Register */
-#define MDMA1_S1_CONFIG 0xFFC01FC8 /* MemDMA1 Stream 1 Source Configuration Register */
-#define MDMA1_S1_X_COUNT 0xFFC01FD0 /* MemDMA1 Stream 1 Source X Count Register */
-#define MDMA1_S1_X_MODIFY 0xFFC01FD4 /* MemDMA1 Stream 1 Source X Modify Register */
-#define MDMA1_S1_Y_COUNT 0xFFC01FD8 /* MemDMA1 Stream 1 Source Y Count Register */
-#define MDMA1_S1_Y_MODIFY 0xFFC01FDC /* MemDMA1 Stream 1 Source Y Modify Register */
-#define MDMA1_S1_CURR_DESC_PTR 0xFFC01FE0 /* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
-#define MDMA1_S1_CURR_ADDR 0xFFC01FE4 /* MemDMA1 Stream 1 Source Current Address Register */
-#define MDMA1_S1_IRQ_STATUS 0xFFC01FE8 /* MemDMA1 Stream 1 Source Interrupt/Status Register */
-#define MDMA1_S1_PERIPHERAL_MAP 0xFFC01FEC /* MemDMA1 Stream 1 Source Peripheral Map Register */
-#define MDMA1_S1_CURR_X_COUNT 0xFFC01FF0 /* MemDMA1 Stream 1 Source Current X Count Register */
-#define MDMA1_S1_CURR_Y_COUNT 0xFFC01FF8 /* MemDMA1 Stream 1 Source Current Y Count Register */
-
-
-/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
-#define UART1_THR 0xFFC02000 /* Transmit Holding register */
-#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
-#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
-#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
-#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
-#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
-#define UART1_LCR 0xFFC0200C /* Line Control Register */
-#define UART1_MCR 0xFFC02010 /* Modem Control Register */
-#define UART1_LSR 0xFFC02014 /* Line Status Register */
-#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
-#define UART1_GCTL 0xFFC02024 /* Global Control Register */
-
-
-/* UART2 Controller (0xFFC02100 - 0xFFC021FF) */
-#define UART2_THR 0xFFC02100 /* Transmit Holding register */
-#define UART2_RBR 0xFFC02100 /* Receive Buffer register */
-#define UART2_DLL 0xFFC02100 /* Divisor Latch (Low-Byte) */
-#define UART2_IER 0xFFC02104 /* Interrupt Enable Register */
-#define UART2_DLH 0xFFC02104 /* Divisor Latch (High-Byte) */
-#define UART2_IIR 0xFFC02108 /* Interrupt Identification Register */
-#define UART2_LCR 0xFFC0210C /* Line Control Register */
-#define UART2_MCR 0xFFC02110 /* Modem Control Register */
-#define UART2_LSR 0xFFC02114 /* Line Status Register */
-#define UART2_SCR 0xFFC0211C /* SCR Scratch Register */
-#define UART2_GCTL 0xFFC02124 /* Global Control Register */
-
-
-/* Two-Wire Interface 1 (0xFFC02200 - 0xFFC022FF) */
-#define TWI1_CLKDIV 0xFFC02200 /* Serial Clock Divider Register */
-#define TWI1_CONTROL 0xFFC02204 /* TWI1 Master Internal Time Reference Register */
-#define TWI1_SLAVE_CTL 0xFFC02208 /* Slave Mode Control Register */
-#define TWI1_SLAVE_STAT 0xFFC0220C /* Slave Mode Status Register */
-#define TWI1_SLAVE_ADDR 0xFFC02210 /* Slave Mode Address Register */
-#define TWI1_MASTER_CTL 0xFFC02214 /* Master Mode Control Register */
-#define TWI1_MASTER_STAT 0xFFC02218 /* Master Mode Status Register */
-#define TWI1_MASTER_ADDR 0xFFC0221C /* Master Mode Address Register */
-#define TWI1_INT_STAT 0xFFC02220 /* TWI1 Master Interrupt Register */
-#define TWI1_INT_MASK 0xFFC02224 /* TWI1 Master Interrupt Mask Register */
-#define TWI1_FIFO_CTL 0xFFC02228 /* FIFO Control Register */
-#define TWI1_FIFO_STAT 0xFFC0222C /* FIFO Status Register */
-#define TWI1_XMT_DATA8 0xFFC02280 /* FIFO Transmit Data Single Byte Register */
-#define TWI1_XMT_DATA16 0xFFC02284 /* FIFO Transmit Data Double Byte Register */
-#define TWI1_RCV_DATA8 0xFFC02288 /* FIFO Receive Data Single Byte Register */
-#define TWI1_RCV_DATA16 0xFFC0228C /* FIFO Receive Data Double Byte Register */
-#define TWI1_REGBASE TWI1_CLKDIV
-
-
-/* the following are for backwards compatibility */
-#define TWI1_PRESCALE TWI1_CONTROL
-#define TWI1_INT_SRC TWI1_INT_STAT
-#define TWI1_INT_ENABLE TWI1_INT_MASK
-
-
-/* SPI1 Controller (0xFFC02300 - 0xFFC023FF) */
-#define SPI1_CTL 0xFFC02300 /* SPI1 Control Register */
-#define SPI1_FLG 0xFFC02304 /* SPI1 Flag register */
-#define SPI1_STAT 0xFFC02308 /* SPI1 Status register */
-#define SPI1_TDBR 0xFFC0230C /* SPI1 Transmit Data Buffer Register */
-#define SPI1_RDBR 0xFFC02310 /* SPI1 Receive Data Buffer Register */
-#define SPI1_BAUD 0xFFC02314 /* SPI1 Baud rate Register */
-#define SPI1_SHADOW 0xFFC02318 /* SPI1_RDBR Shadow Register */
-#define SPI1_REGBASE SPI1_CTL
-
-/* SPI2 Controller (0xFFC02400 - 0xFFC024FF) */
-#define SPI2_CTL 0xFFC02400 /* SPI2 Control Register */
-#define SPI2_FLG 0xFFC02404 /* SPI2 Flag register */
-#define SPI2_STAT 0xFFC02408 /* SPI2 Status register */
-#define SPI2_TDBR 0xFFC0240C /* SPI2 Transmit Data Buffer Register */
-#define SPI2_RDBR 0xFFC02410 /* SPI2 Receive Data Buffer Register */
-#define SPI2_BAUD 0xFFC02414 /* SPI2 Baud rate Register */
-#define SPI2_SHADOW 0xFFC02418 /* SPI2_RDBR Shadow Register */
-#define SPI2_REGBASE SPI2_CTL
-
-/* SPORT2 Controller (0xFFC02500 - 0xFFC025FF) */
-#define SPORT2_TCR1 0xFFC02500 /* SPORT2 Transmit Configuration 1 Register */
-#define SPORT2_TCR2 0xFFC02504 /* SPORT2 Transmit Configuration 2 Register */
-#define SPORT2_TCLKDIV 0xFFC02508 /* SPORT2 Transmit Clock Divider */
-#define SPORT2_TFSDIV 0xFFC0250C /* SPORT2 Transmit Frame Sync Divider */
-#define SPORT2_TX 0xFFC02510 /* SPORT2 TX Data Register */
-#define SPORT2_RX 0xFFC02518 /* SPORT2 RX Data Register */
-#define SPORT2_RCR1 0xFFC02520 /* SPORT2 Transmit Configuration 1 Register */
-#define SPORT2_RCR2 0xFFC02524 /* SPORT2 Transmit Configuration 2 Register */
-#define SPORT2_RCLKDIV 0xFFC02528 /* SPORT2 Receive Clock Divider */
-#define SPORT2_RFSDIV 0xFFC0252C /* SPORT2 Receive Frame Sync Divider */
-#define SPORT2_STAT 0xFFC02530 /* SPORT2 Status Register */
-#define SPORT2_CHNL 0xFFC02534 /* SPORT2 Current Channel Register */
-#define SPORT2_MCMC1 0xFFC02538 /* SPORT2 Multi-Channel Configuration Register 1 */
-#define SPORT2_MCMC2 0xFFC0253C /* SPORT2 Multi-Channel Configuration Register 2 */
-#define SPORT2_MTCS0 0xFFC02540 /* SPORT2 Multi-Channel Transmit Select Register 0 */
-#define SPORT2_MTCS1 0xFFC02544 /* SPORT2 Multi-Channel Transmit Select Register 1 */
-#define SPORT2_MTCS2 0xFFC02548 /* SPORT2 Multi-Channel Transmit Select Register 2 */
-#define SPORT2_MTCS3 0xFFC0254C /* SPORT2 Multi-Channel Transmit Select Register 3 */
-#define SPORT2_MRCS0 0xFFC02550 /* SPORT2 Multi-Channel Receive Select Register 0 */
-#define SPORT2_MRCS1 0xFFC02554 /* SPORT2 Multi-Channel Receive Select Register 1 */
-#define SPORT2_MRCS2 0xFFC02558 /* SPORT2 Multi-Channel Receive Select Register 2 */
-#define SPORT2_MRCS3 0xFFC0255C /* SPORT2 Multi-Channel Receive Select Register 3 */
-
-
-/* SPORT3 Controller (0xFFC02600 - 0xFFC026FF) */
-#define SPORT3_TCR1 0xFFC02600 /* SPORT3 Transmit Configuration 1 Register */
-#define SPORT3_TCR2 0xFFC02604 /* SPORT3 Transmit Configuration 2 Register */
-#define SPORT3_TCLKDIV 0xFFC02608 /* SPORT3 Transmit Clock Divider */
-#define SPORT3_TFSDIV 0xFFC0260C /* SPORT3 Transmit Frame Sync Divider */
-#define SPORT3_TX 0xFFC02610 /* SPORT3 TX Data Register */
-#define SPORT3_RX 0xFFC02618 /* SPORT3 RX Data Register */
-#define SPORT3_RCR1 0xFFC02620 /* SPORT3 Transmit Configuration 1 Register */
-#define SPORT3_RCR2 0xFFC02624 /* SPORT3 Transmit Configuration 2 Register */
-#define SPORT3_RCLKDIV 0xFFC02628 /* SPORT3 Receive Clock Divider */
-#define SPORT3_RFSDIV 0xFFC0262C /* SPORT3 Receive Frame Sync Divider */
-#define SPORT3_STAT 0xFFC02630 /* SPORT3 Status Register */
-#define SPORT3_CHNL 0xFFC02634 /* SPORT3 Current Channel Register */
-#define SPORT3_MCMC1 0xFFC02638 /* SPORT3 Multi-Channel Configuration Register 1 */
-#define SPORT3_MCMC2 0xFFC0263C /* SPORT3 Multi-Channel Configuration Register 2 */
-#define SPORT3_MTCS0 0xFFC02640 /* SPORT3 Multi-Channel Transmit Select Register 0 */
-#define SPORT3_MTCS1 0xFFC02644 /* SPORT3 Multi-Channel Transmit Select Register 1 */
-#define SPORT3_MTCS2 0xFFC02648 /* SPORT3 Multi-Channel Transmit Select Register 2 */
-#define SPORT3_MTCS3 0xFFC0264C /* SPORT3 Multi-Channel Transmit Select Register 3 */
-#define SPORT3_MRCS0 0xFFC02650 /* SPORT3 Multi-Channel Receive Select Register 0 */
-#define SPORT3_MRCS1 0xFFC02654 /* SPORT3 Multi-Channel Receive Select Register 1 */
-#define SPORT3_MRCS2 0xFFC02658 /* SPORT3 Multi-Channel Receive Select Register 2 */
-#define SPORT3_MRCS3 0xFFC0265C /* SPORT3 Multi-Channel Receive Select Register 3 */
-
+#include "defBF538.h"
/* Media Transceiver (MXVR) (0xFFC02700 - 0xFFC028FF) */
@@ -995,1249 +149,4 @@
#define MXVR_BLOCK_CNT 0xFFC028C0 /* MXVR Block Counter */
#define MXVR_PLL_CTL_2 0xFFC028C4 /* MXVR Phase Lock Loop Control Register 2 */
-
-/* CAN Controller (0xFFC02A00 - 0xFFC02FFF) */
-/* For Mailboxes 0-15 */
-#define CAN_MC1 0xFFC02A00 /* Mailbox config reg 1 */
-#define CAN_MD1 0xFFC02A04 /* Mailbox direction reg 1 */
-#define CAN_TRS1 0xFFC02A08 /* Transmit Request Set reg 1 */
-#define CAN_TRR1 0xFFC02A0C /* Transmit Request Reset reg 1 */
-#define CAN_TA1 0xFFC02A10 /* Transmit Acknowledge reg 1 */
-#define CAN_AA1 0xFFC02A14 /* Transmit Abort Acknowledge reg 1 */
-#define CAN_RMP1 0xFFC02A18 /* Receive Message Pending reg 1 */
-#define CAN_RML1 0xFFC02A1C /* Receive Message Lost reg 1 */
-#define CAN_MBTIF1 0xFFC02A20 /* Mailbox Transmit Interrupt Flag reg 1 */
-#define CAN_MBRIF1 0xFFC02A24 /* Mailbox Receive Interrupt Flag reg 1 */
-#define CAN_MBIM1 0xFFC02A28 /* Mailbox Interrupt Mask reg 1 */
-#define CAN_RFH1 0xFFC02A2C /* Remote Frame Handling reg 1 */
-#define CAN_OPSS1 0xFFC02A30 /* Overwrite Protection Single Shot Xmission reg 1 */
-
-/* For Mailboxes 16-31 */
-#define CAN_MC2 0xFFC02A40 /* Mailbox config reg 2 */
-#define CAN_MD2 0xFFC02A44 /* Mailbox direction reg 2 */
-#define CAN_TRS2 0xFFC02A48 /* Transmit Request Set reg 2 */
-#define CAN_TRR2 0xFFC02A4C /* Transmit Request Reset reg 2 */
-#define CAN_TA2 0xFFC02A50 /* Transmit Acknowledge reg 2 */
-#define CAN_AA2 0xFFC02A54 /* Transmit Abort Acknowledge reg 2 */
-#define CAN_RMP2 0xFFC02A58 /* Receive Message Pending reg 2 */
-#define CAN_RML2 0xFFC02A5C /* Receive Message Lost reg 2 */
-#define CAN_MBTIF2 0xFFC02A60 /* Mailbox Transmit Interrupt Flag reg 2 */
-#define CAN_MBRIF2 0xFFC02A64 /* Mailbox Receive Interrupt Flag reg 2 */
-#define CAN_MBIM2 0xFFC02A68 /* Mailbox Interrupt Mask reg 2 */
-#define CAN_RFH2 0xFFC02A6C /* Remote Frame Handling reg 2 */
-#define CAN_OPSS2 0xFFC02A70 /* Overwrite Protection Single Shot Xmission reg 2 */
-
-#define CAN_CLOCK 0xFFC02A80 /* Bit Timing Configuration register 0 */
-#define CAN_TIMING 0xFFC02A84 /* Bit Timing Configuration register 1 */
-
-#define CAN_DEBUG 0xFFC02A88 /* Debug Register */
-/* the following is for backwards compatibility */
-#define CAN_CNF CAN_DEBUG
-
-#define CAN_STATUS 0xFFC02A8C /* Global Status Register */
-#define CAN_CEC 0xFFC02A90 /* Error Counter Register */
-#define CAN_GIS 0xFFC02A94 /* Global Interrupt Status Register */
-#define CAN_GIM 0xFFC02A98 /* Global Interrupt Mask Register */
-#define CAN_GIF 0xFFC02A9C /* Global Interrupt Flag Register */
-#define CAN_CONTROL 0xFFC02AA0 /* Master Control Register */
-#define CAN_INTR 0xFFC02AA4 /* Interrupt Pending Register */
-#define CAN_MBTD 0xFFC02AAC /* Mailbox Temporary Disable Feature */
-#define CAN_EWR 0xFFC02AB0 /* Programmable Warning Level */
-#define CAN_ESR 0xFFC02AB4 /* Error Status Register */
-#define CAN_UCCNT 0xFFC02AC4 /* Universal Counter */
-#define CAN_UCRC 0xFFC02AC8 /* Universal Counter Reload/Capture Register */
-#define CAN_UCCNF 0xFFC02ACC /* Universal Counter Configuration Register */
-
-/* Mailbox Acceptance Masks */
-#define CAN_AM00L 0xFFC02B00 /* Mailbox 0 Low Acceptance Mask */
-#define CAN_AM00H 0xFFC02B04 /* Mailbox 0 High Acceptance Mask */
-#define CAN_AM01L 0xFFC02B08 /* Mailbox 1 Low Acceptance Mask */
-#define CAN_AM01H 0xFFC02B0C /* Mailbox 1 High Acceptance Mask */
-#define CAN_AM02L 0xFFC02B10 /* Mailbox 2 Low Acceptance Mask */
-#define CAN_AM02H 0xFFC02B14 /* Mailbox 2 High Acceptance Mask */
-#define CAN_AM03L 0xFFC02B18 /* Mailbox 3 Low Acceptance Mask */
-#define CAN_AM03H 0xFFC02B1C /* Mailbox 3 High Acceptance Mask */
-#define CAN_AM04L 0xFFC02B20 /* Mailbox 4 Low Acceptance Mask */
-#define CAN_AM04H 0xFFC02B24 /* Mailbox 4 High Acceptance Mask */
-#define CAN_AM05L 0xFFC02B28 /* Mailbox 5 Low Acceptance Mask */
-#define CAN_AM05H 0xFFC02B2C /* Mailbox 5 High Acceptance Mask */
-#define CAN_AM06L 0xFFC02B30 /* Mailbox 6 Low Acceptance Mask */
-#define CAN_AM06H 0xFFC02B34 /* Mailbox 6 High Acceptance Mask */
-#define CAN_AM07L 0xFFC02B38 /* Mailbox 7 Low Acceptance Mask */
-#define CAN_AM07H 0xFFC02B3C /* Mailbox 7 High Acceptance Mask */
-#define CAN_AM08L 0xFFC02B40 /* Mailbox 8 Low Acceptance Mask */
-#define CAN_AM08H 0xFFC02B44 /* Mailbox 8 High Acceptance Mask */
-#define CAN_AM09L 0xFFC02B48 /* Mailbox 9 Low Acceptance Mask */
-#define CAN_AM09H 0xFFC02B4C /* Mailbox 9 High Acceptance Mask */
-#define CAN_AM10L 0xFFC02B50 /* Mailbox 10 Low Acceptance Mask */
-#define CAN_AM10H 0xFFC02B54 /* Mailbox 10 High Acceptance Mask */
-#define CAN_AM11L 0xFFC02B58 /* Mailbox 11 Low Acceptance Mask */
-#define CAN_AM11H 0xFFC02B5C /* Mailbox 11 High Acceptance Mask */
-#define CAN_AM12L 0xFFC02B60 /* Mailbox 12 Low Acceptance Mask */
-#define CAN_AM12H 0xFFC02B64 /* Mailbox 12 High Acceptance Mask */
-#define CAN_AM13L 0xFFC02B68 /* Mailbox 13 Low Acceptance Mask */
-#define CAN_AM13H 0xFFC02B6C /* Mailbox 13 High Acceptance Mask */
-#define CAN_AM14L 0xFFC02B70 /* Mailbox 14 Low Acceptance Mask */
-#define CAN_AM14H 0xFFC02B74 /* Mailbox 14 High Acceptance Mask */
-#define CAN_AM15L 0xFFC02B78 /* Mailbox 15 Low Acceptance Mask */
-#define CAN_AM15H 0xFFC02B7C /* Mailbox 15 High Acceptance Mask */
-
-#define CAN_AM16L 0xFFC02B80 /* Mailbox 16 Low Acceptance Mask */
-#define CAN_AM16H 0xFFC02B84 /* Mailbox 16 High Acceptance Mask */
-#define CAN_AM17L 0xFFC02B88 /* Mailbox 17 Low Acceptance Mask */
-#define CAN_AM17H 0xFFC02B8C /* Mailbox 17 High Acceptance Mask */
-#define CAN_AM18L 0xFFC02B90 /* Mailbox 18 Low Acceptance Mask */
-#define CAN_AM18H 0xFFC02B94 /* Mailbox 18 High Acceptance Mask */
-#define CAN_AM19L 0xFFC02B98 /* Mailbox 19 Low Acceptance Mask */
-#define CAN_AM19H 0xFFC02B9C /* Mailbox 19 High Acceptance Mask */
-#define CAN_AM20L 0xFFC02BA0 /* Mailbox 20 Low Acceptance Mask */
-#define CAN_AM20H 0xFFC02BA4 /* Mailbox 20 High Acceptance Mask */
-#define CAN_AM21L 0xFFC02BA8 /* Mailbox 21 Low Acceptance Mask */
-#define CAN_AM21H 0xFFC02BAC /* Mailbox 21 High Acceptance Mask */
-#define CAN_AM22L 0xFFC02BB0 /* Mailbox 22 Low Acceptance Mask */
-#define CAN_AM22H 0xFFC02BB4 /* Mailbox 22 High Acceptance Mask */
-#define CAN_AM23L 0xFFC02BB8 /* Mailbox 23 Low Acceptance Mask */
-#define CAN_AM23H 0xFFC02BBC /* Mailbox 23 High Acceptance Mask */
-#define CAN_AM24L 0xFFC02BC0 /* Mailbox 24 Low Acceptance Mask */
-#define CAN_AM24H 0xFFC02BC4 /* Mailbox 24 High Acceptance Mask */
-#define CAN_AM25L 0xFFC02BC8 /* Mailbox 25 Low Acceptance Mask */
-#define CAN_AM25H 0xFFC02BCC /* Mailbox 25 High Acceptance Mask */
-#define CAN_AM26L 0xFFC02BD0 /* Mailbox 26 Low Acceptance Mask */
-#define CAN_AM26H 0xFFC02BD4 /* Mailbox 26 High Acceptance Mask */
-#define CAN_AM27L 0xFFC02BD8 /* Mailbox 27 Low Acceptance Mask */
-#define CAN_AM27H 0xFFC02BDC /* Mailbox 27 High Acceptance Mask */
-#define CAN_AM28L 0xFFC02BE0 /* Mailbox 28 Low Acceptance Mask */
-#define CAN_AM28H 0xFFC02BE4 /* Mailbox 28 High Acceptance Mask */
-#define CAN_AM29L 0xFFC02BE8 /* Mailbox 29 Low Acceptance Mask */
-#define CAN_AM29H 0xFFC02BEC /* Mailbox 29 High Acceptance Mask */
-#define CAN_AM30L 0xFFC02BF0 /* Mailbox 30 Low Acceptance Mask */
-#define CAN_AM30H 0xFFC02BF4 /* Mailbox 30 High Acceptance Mask */
-#define CAN_AM31L 0xFFC02BF8 /* Mailbox 31 Low Acceptance Mask */
-#define CAN_AM31H 0xFFC02BFC /* Mailbox 31 High Acceptance Mask */
-
-/* CAN Acceptance Mask Macros */
-#define CAN_AM_L(x) (CAN_AM00L+((x)*0x8))
-#define CAN_AM_H(x) (CAN_AM00H+((x)*0x8))
-
-/* Mailbox Registers */
-#define CAN_MB00_DATA0 0xFFC02C00 /* Mailbox 0 Data Word 0 [15:0] Register */
-#define CAN_MB00_DATA1 0xFFC02C04 /* Mailbox 0 Data Word 1 [31:16] Register */
-#define CAN_MB00_DATA2 0xFFC02C08 /* Mailbox 0 Data Word 2 [47:32] Register */
-#define CAN_MB00_DATA3 0xFFC02C0C /* Mailbox 0 Data Word 3 [63:48] Register */
-#define CAN_MB00_LENGTH 0xFFC02C10 /* Mailbox 0 Data Length Code Register */
-#define CAN_MB00_TIMESTAMP 0xFFC02C14 /* Mailbox 0 Time Stamp Value Register */
-#define CAN_MB00_ID0 0xFFC02C18 /* Mailbox 0 Identifier Low Register */
-#define CAN_MB00_ID1 0xFFC02C1C /* Mailbox 0 Identifier High Register */
-
-#define CAN_MB01_DATA0 0xFFC02C20 /* Mailbox 1 Data Word 0 [15:0] Register */
-#define CAN_MB01_DATA1 0xFFC02C24 /* Mailbox 1 Data Word 1 [31:16] Register */
-#define CAN_MB01_DATA2 0xFFC02C28 /* Mailbox 1 Data Word 2 [47:32] Register */
-#define CAN_MB01_DATA3 0xFFC02C2C /* Mailbox 1 Data Word 3 [63:48] Register */
-#define CAN_MB01_LENGTH 0xFFC02C30 /* Mailbox 1 Data Length Code Register */
-#define CAN_MB01_TIMESTAMP 0xFFC02C34 /* Mailbox 1 Time Stamp Value Register */
-#define CAN_MB01_ID0 0xFFC02C38 /* Mailbox 1 Identifier Low Register */
-#define CAN_MB01_ID1 0xFFC02C3C /* Mailbox 1 Identifier High Register */
-
-#define CAN_MB02_DATA0 0xFFC02C40 /* Mailbox 2 Data Word 0 [15:0] Register */
-#define CAN_MB02_DATA1 0xFFC02C44 /* Mailbox 2 Data Word 1 [31:16] Register */
-#define CAN_MB02_DATA2 0xFFC02C48 /* Mailbox 2 Data Word 2 [47:32] Register */
-#define CAN_MB02_DATA3 0xFFC02C4C /* Mailbox 2 Data Word 3 [63:48] Register */
-#define CAN_MB02_LENGTH 0xFFC02C50 /* Mailbox 2 Data Length Code Register */
-#define CAN_MB02_TIMESTAMP 0xFFC02C54 /* Mailbox 2 Time Stamp Value Register */
-#define CAN_MB02_ID0 0xFFC02C58 /* Mailbox 2 Identifier Low Register */
-#define CAN_MB02_ID1 0xFFC02C5C /* Mailbox 2 Identifier High Register */
-
-#define CAN_MB03_DATA0 0xFFC02C60 /* Mailbox 3 Data Word 0 [15:0] Register */
-#define CAN_MB03_DATA1 0xFFC02C64 /* Mailbox 3 Data Word 1 [31:16] Register */
-#define CAN_MB03_DATA2 0xFFC02C68 /* Mailbox 3 Data Word 2 [47:32] Register */
-#define CAN_MB03_DATA3 0xFFC02C6C /* Mailbox 3 Data Word 3 [63:48] Register */
-#define CAN_MB03_LENGTH 0xFFC02C70 /* Mailbox 3 Data Length Code Register */
-#define CAN_MB03_TIMESTAMP 0xFFC02C74 /* Mailbox 3 Time Stamp Value Register */
-#define CAN_MB03_ID0 0xFFC02C78 /* Mailbox 3 Identifier Low Register */
-#define CAN_MB03_ID1 0xFFC02C7C /* Mailbox 3 Identifier High Register */
-
-#define CAN_MB04_DATA0 0xFFC02C80 /* Mailbox 4 Data Word 0 [15:0] Register */
-#define CAN_MB04_DATA1 0xFFC02C84 /* Mailbox 4 Data Word 1 [31:16] Register */
-#define CAN_MB04_DATA2 0xFFC02C88 /* Mailbox 4 Data Word 2 [47:32] Register */
-#define CAN_MB04_DATA3 0xFFC02C8C /* Mailbox 4 Data Word 3 [63:48] Register */
-#define CAN_MB04_LENGTH 0xFFC02C90 /* Mailbox 4 Data Length Code Register */
-#define CAN_MB04_TIMESTAMP 0xFFC02C94 /* Mailbox 4 Time Stamp Value Register */
-#define CAN_MB04_ID0 0xFFC02C98 /* Mailbox 4 Identifier Low Register */
-#define CAN_MB04_ID1 0xFFC02C9C /* Mailbox 4 Identifier High Register */
-
-#define CAN_MB05_DATA0 0xFFC02CA0 /* Mailbox 5 Data Word 0 [15:0] Register */
-#define CAN_MB05_DATA1 0xFFC02CA4 /* Mailbox 5 Data Word 1 [31:16] Register */
-#define CAN_MB05_DATA2 0xFFC02CA8 /* Mailbox 5 Data Word 2 [47:32] Register */
-#define CAN_MB05_DATA3 0xFFC02CAC /* Mailbox 5 Data Word 3 [63:48] Register */
-#define CAN_MB05_LENGTH 0xFFC02CB0 /* Mailbox 5 Data Length Code Register */
-#define CAN_MB05_TIMESTAMP 0xFFC02CB4 /* Mailbox 5 Time Stamp Value Register */
-#define CAN_MB05_ID0 0xFFC02CB8 /* Mailbox 5 Identifier Low Register */
-#define CAN_MB05_ID1 0xFFC02CBC /* Mailbox 5 Identifier High Register */
-
-#define CAN_MB06_DATA0 0xFFC02CC0 /* Mailbox 6 Data Word 0 [15:0] Register */
-#define CAN_MB06_DATA1 0xFFC02CC4 /* Mailbox 6 Data Word 1 [31:16] Register */
-#define CAN_MB06_DATA2 0xFFC02CC8 /* Mailbox 6 Data Word 2 [47:32] Register */
-#define CAN_MB06_DATA3 0xFFC02CCC /* Mailbox 6 Data Word 3 [63:48] Register */
-#define CAN_MB06_LENGTH 0xFFC02CD0 /* Mailbox 6 Data Length Code Register */
-#define CAN_MB06_TIMESTAMP 0xFFC02CD4 /* Mailbox 6 Time Stamp Value Register */
-#define CAN_MB06_ID0 0xFFC02CD8 /* Mailbox 6 Identifier Low Register */
-#define CAN_MB06_ID1 0xFFC02CDC /* Mailbox 6 Identifier High Register */
-
-#define CAN_MB07_DATA0 0xFFC02CE0 /* Mailbox 7 Data Word 0 [15:0] Register */
-#define CAN_MB07_DATA1 0xFFC02CE4 /* Mailbox 7 Data Word 1 [31:16] Register */
-#define CAN_MB07_DATA2 0xFFC02CE8 /* Mailbox 7 Data Word 2 [47:32] Register */
-#define CAN_MB07_DATA3 0xFFC02CEC /* Mailbox 7 Data Word 3 [63:48] Register */
-#define CAN_MB07_LENGTH 0xFFC02CF0 /* Mailbox 7 Data Length Code Register */
-#define CAN_MB07_TIMESTAMP 0xFFC02CF4 /* Mailbox 7 Time Stamp Value Register */
-#define CAN_MB07_ID0 0xFFC02CF8 /* Mailbox 7 Identifier Low Register */
-#define CAN_MB07_ID1 0xFFC02CFC /* Mailbox 7 Identifier High Register */
-
-#define CAN_MB08_DATA0 0xFFC02D00 /* Mailbox 8 Data Word 0 [15:0] Register */
-#define CAN_MB08_DATA1 0xFFC02D04 /* Mailbox 8 Data Word 1 [31:16] Register */
-#define CAN_MB08_DATA2 0xFFC02D08 /* Mailbox 8 Data Word 2 [47:32] Register */
-#define CAN_MB08_DATA3 0xFFC02D0C /* Mailbox 8 Data Word 3 [63:48] Register */
-#define CAN_MB08_LENGTH 0xFFC02D10 /* Mailbox 8 Data Length Code Register */
-#define CAN_MB08_TIMESTAMP 0xFFC02D14 /* Mailbox 8 Time Stamp Value Register */
-#define CAN_MB08_ID0 0xFFC02D18 /* Mailbox 8 Identifier Low Register */
-#define CAN_MB08_ID1 0xFFC02D1C /* Mailbox 8 Identifier High Register */
-
-#define CAN_MB09_DATA0 0xFFC02D20 /* Mailbox 9 Data Word 0 [15:0] Register */
-#define CAN_MB09_DATA1 0xFFC02D24 /* Mailbox 9 Data Word 1 [31:16] Register */
-#define CAN_MB09_DATA2 0xFFC02D28 /* Mailbox 9 Data Word 2 [47:32] Register */
-#define CAN_MB09_DATA3 0xFFC02D2C /* Mailbox 9 Data Word 3 [63:48] Register */
-#define CAN_MB09_LENGTH 0xFFC02D30 /* Mailbox 9 Data Length Code Register */
-#define CAN_MB09_TIMESTAMP 0xFFC02D34 /* Mailbox 9 Time Stamp Value Register */
-#define CAN_MB09_ID0 0xFFC02D38 /* Mailbox 9 Identifier Low Register */
-#define CAN_MB09_ID1 0xFFC02D3C /* Mailbox 9 Identifier High Register */
-
-#define CAN_MB10_DATA0 0xFFC02D40 /* Mailbox 10 Data Word 0 [15:0] Register */
-#define CAN_MB10_DATA1 0xFFC02D44 /* Mailbox 10 Data Word 1 [31:16] Register */
-#define CAN_MB10_DATA2 0xFFC02D48 /* Mailbox 10 Data Word 2 [47:32] Register */
-#define CAN_MB10_DATA3 0xFFC02D4C /* Mailbox 10 Data Word 3 [63:48] Register */
-#define CAN_MB10_LENGTH 0xFFC02D50 /* Mailbox 10 Data Length Code Register */
-#define CAN_MB10_TIMESTAMP 0xFFC02D54 /* Mailbox 10 Time Stamp Value Register */
-#define CAN_MB10_ID0 0xFFC02D58 /* Mailbox 10 Identifier Low Register */
-#define CAN_MB10_ID1 0xFFC02D5C /* Mailbox 10 Identifier High Register */
-
-#define CAN_MB11_DATA0 0xFFC02D60 /* Mailbox 11 Data Word 0 [15:0] Register */
-#define CAN_MB11_DATA1 0xFFC02D64 /* Mailbox 11 Data Word 1 [31:16] Register */
-#define CAN_MB11_DATA2 0xFFC02D68 /* Mailbox 11 Data Word 2 [47:32] Register */
-#define CAN_MB11_DATA3 0xFFC02D6C /* Mailbox 11 Data Word 3 [63:48] Register */
-#define CAN_MB11_LENGTH 0xFFC02D70 /* Mailbox 11 Data Length Code Register */
-#define CAN_MB11_TIMESTAMP 0xFFC02D74 /* Mailbox 11 Time Stamp Value Register */
-#define CAN_MB11_ID0 0xFFC02D78 /* Mailbox 11 Identifier Low Register */
-#define CAN_MB11_ID1 0xFFC02D7C /* Mailbox 11 Identifier High Register */
-
-#define CAN_MB12_DATA0 0xFFC02D80 /* Mailbox 12 Data Word 0 [15:0] Register */
-#define CAN_MB12_DATA1 0xFFC02D84 /* Mailbox 12 Data Word 1 [31:16] Register */
-#define CAN_MB12_DATA2 0xFFC02D88 /* Mailbox 12 Data Word 2 [47:32] Register */
-#define CAN_MB12_DATA3 0xFFC02D8C /* Mailbox 12 Data Word 3 [63:48] Register */
-#define CAN_MB12_LENGTH 0xFFC02D90 /* Mailbox 12 Data Length Code Register */
-#define CAN_MB12_TIMESTAMP 0xFFC02D94 /* Mailbox 12 Time Stamp Value Register */
-#define CAN_MB12_ID0 0xFFC02D98 /* Mailbox 12 Identifier Low Register */
-#define CAN_MB12_ID1 0xFFC02D9C /* Mailbox 12 Identifier High Register */
-
-#define CAN_MB13_DATA0 0xFFC02DA0 /* Mailbox 13 Data Word 0 [15:0] Register */
-#define CAN_MB13_DATA1 0xFFC02DA4 /* Mailbox 13 Data Word 1 [31:16] Register */
-#define CAN_MB13_DATA2 0xFFC02DA8 /* Mailbox 13 Data Word 2 [47:32] Register */
-#define CAN_MB13_DATA3 0xFFC02DAC /* Mailbox 13 Data Word 3 [63:48] Register */
-#define CAN_MB13_LENGTH 0xFFC02DB0 /* Mailbox 13 Data Length Code Register */
-#define CAN_MB13_TIMESTAMP 0xFFC02DB4 /* Mailbox 13 Time Stamp Value Register */
-#define CAN_MB13_ID0 0xFFC02DB8 /* Mailbox 13 Identifier Low Register */
-#define CAN_MB13_ID1 0xFFC02DBC /* Mailbox 13 Identifier High Register */
-
-#define CAN_MB14_DATA0 0xFFC02DC0 /* Mailbox 14 Data Word 0 [15:0] Register */
-#define CAN_MB14_DATA1 0xFFC02DC4 /* Mailbox 14 Data Word 1 [31:16] Register */
-#define CAN_MB14_DATA2 0xFFC02DC8 /* Mailbox 14 Data Word 2 [47:32] Register */
-#define CAN_MB14_DATA3 0xFFC02DCC /* Mailbox 14 Data Word 3 [63:48] Register */
-#define CAN_MB14_LENGTH 0xFFC02DD0 /* Mailbox 14 Data Length Code Register */
-#define CAN_MB14_TIMESTAMP 0xFFC02DD4 /* Mailbox 14 Time Stamp Value Register */
-#define CAN_MB14_ID0 0xFFC02DD8 /* Mailbox 14 Identifier Low Register */
-#define CAN_MB14_ID1 0xFFC02DDC /* Mailbox 14 Identifier High Register */
-
-#define CAN_MB15_DATA0 0xFFC02DE0 /* Mailbox 15 Data Word 0 [15:0] Register */
-#define CAN_MB15_DATA1 0xFFC02DE4 /* Mailbox 15 Data Word 1 [31:16] Register */
-#define CAN_MB15_DATA2 0xFFC02DE8 /* Mailbox 15 Data Word 2 [47:32] Register */
-#define CAN_MB15_DATA3 0xFFC02DEC /* Mailbox 15 Data Word 3 [63:48] Register */
-#define CAN_MB15_LENGTH 0xFFC02DF0 /* Mailbox 15 Data Length Code Register */
-#define CAN_MB15_TIMESTAMP 0xFFC02DF4 /* Mailbox 15 Time Stamp Value Register */
-#define CAN_MB15_ID0 0xFFC02DF8 /* Mailbox 15 Identifier Low Register */
-#define CAN_MB15_ID1 0xFFC02DFC /* Mailbox 15 Identifier High Register */
-
-#define CAN_MB16_DATA0 0xFFC02E00 /* Mailbox 16 Data Word 0 [15:0] Register */
-#define CAN_MB16_DATA1 0xFFC02E04 /* Mailbox 16 Data Word 1 [31:16] Register */
-#define CAN_MB16_DATA2 0xFFC02E08 /* Mailbox 16 Data Word 2 [47:32] Register */
-#define CAN_MB16_DATA3 0xFFC02E0C /* Mailbox 16 Data Word 3 [63:48] Register */
-#define CAN_MB16_LENGTH 0xFFC02E10 /* Mailbox 16 Data Length Code Register */
-#define CAN_MB16_TIMESTAMP 0xFFC02E14 /* Mailbox 16 Time Stamp Value Register */
-#define CAN_MB16_ID0 0xFFC02E18 /* Mailbox 16 Identifier Low Register */
-#define CAN_MB16_ID1 0xFFC02E1C /* Mailbox 16 Identifier High Register */
-
-#define CAN_MB17_DATA0 0xFFC02E20 /* Mailbox 17 Data Word 0 [15:0] Register */
-#define CAN_MB17_DATA1 0xFFC02E24 /* Mailbox 17 Data Word 1 [31:16] Register */
-#define CAN_MB17_DATA2 0xFFC02E28 /* Mailbox 17 Data Word 2 [47:32] Register */
-#define CAN_MB17_DATA3 0xFFC02E2C /* Mailbox 17 Data Word 3 [63:48] Register */
-#define CAN_MB17_LENGTH 0xFFC02E30 /* Mailbox 17 Data Length Code Register */
-#define CAN_MB17_TIMESTAMP 0xFFC02E34 /* Mailbox 17 Time Stamp Value Register */
-#define CAN_MB17_ID0 0xFFC02E38 /* Mailbox 17 Identifier Low Register */
-#define CAN_MB17_ID1 0xFFC02E3C /* Mailbox 17 Identifier High Register */
-
-#define CAN_MB18_DATA0 0xFFC02E40 /* Mailbox 18 Data Word 0 [15:0] Register */
-#define CAN_MB18_DATA1 0xFFC02E44 /* Mailbox 18 Data Word 1 [31:16] Register */
-#define CAN_MB18_DATA2 0xFFC02E48 /* Mailbox 18 Data Word 2 [47:32] Register */
-#define CAN_MB18_DATA3 0xFFC02E4C /* Mailbox 18 Data Word 3 [63:48] Register */
-#define CAN_MB18_LENGTH 0xFFC02E50 /* Mailbox 18 Data Length Code Register */
-#define CAN_MB18_TIMESTAMP 0xFFC02E54 /* Mailbox 18 Time Stamp Value Register */
-#define CAN_MB18_ID0 0xFFC02E58 /* Mailbox 18 Identifier Low Register */
-#define CAN_MB18_ID1 0xFFC02E5C /* Mailbox 18 Identifier High Register */
-
-#define CAN_MB19_DATA0 0xFFC02E60 /* Mailbox 19 Data Word 0 [15:0] Register */
-#define CAN_MB19_DATA1 0xFFC02E64 /* Mailbox 19 Data Word 1 [31:16] Register */
-#define CAN_MB19_DATA2 0xFFC02E68 /* Mailbox 19 Data Word 2 [47:32] Register */
-#define CAN_MB19_DATA3 0xFFC02E6C /* Mailbox 19 Data Word 3 [63:48] Register */
-#define CAN_MB19_LENGTH 0xFFC02E70 /* Mailbox 19 Data Length Code Register */
-#define CAN_MB19_TIMESTAMP 0xFFC02E74 /* Mailbox 19 Time Stamp Value Register */
-#define CAN_MB19_ID0 0xFFC02E78 /* Mailbox 19 Identifier Low Register */
-#define CAN_MB19_ID1 0xFFC02E7C /* Mailbox 19 Identifier High Register */
-
-#define CAN_MB20_DATA0 0xFFC02E80 /* Mailbox 20 Data Word 0 [15:0] Register */
-#define CAN_MB20_DATA1 0xFFC02E84 /* Mailbox 20 Data Word 1 [31:16] Register */
-#define CAN_MB20_DATA2 0xFFC02E88 /* Mailbox 20 Data Word 2 [47:32] Register */
-#define CAN_MB20_DATA3 0xFFC02E8C /* Mailbox 20 Data Word 3 [63:48] Register */
-#define CAN_MB20_LENGTH 0xFFC02E90 /* Mailbox 20 Data Length Code Register */
-#define CAN_MB20_TIMESTAMP 0xFFC02E94 /* Mailbox 20 Time Stamp Value Register */
-#define CAN_MB20_ID0 0xFFC02E98 /* Mailbox 20 Identifier Low Register */
-#define CAN_MB20_ID1 0xFFC02E9C /* Mailbox 20 Identifier High Register */
-
-#define CAN_MB21_DATA0 0xFFC02EA0 /* Mailbox 21 Data Word 0 [15:0] Register */
-#define CAN_MB21_DATA1 0xFFC02EA4 /* Mailbox 21 Data Word 1 [31:16] Register */
-#define CAN_MB21_DATA2 0xFFC02EA8 /* Mailbox 21 Data Word 2 [47:32] Register */
-#define CAN_MB21_DATA3 0xFFC02EAC /* Mailbox 21 Data Word 3 [63:48] Register */
-#define CAN_MB21_LENGTH 0xFFC02EB0 /* Mailbox 21 Data Length Code Register */
-#define CAN_MB21_TIMESTAMP 0xFFC02EB4 /* Mailbox 21 Time Stamp Value Register */
-#define CAN_MB21_ID0 0xFFC02EB8 /* Mailbox 21 Identifier Low Register */
-#define CAN_MB21_ID1 0xFFC02EBC /* Mailbox 21 Identifier High Register */
-
-#define CAN_MB22_DATA0 0xFFC02EC0 /* Mailbox 22 Data Word 0 [15:0] Register */
-#define CAN_MB22_DATA1 0xFFC02EC4 /* Mailbox 22 Data Word 1 [31:16] Register */
-#define CAN_MB22_DATA2 0xFFC02EC8 /* Mailbox 22 Data Word 2 [47:32] Register */
-#define CAN_MB22_DATA3 0xFFC02ECC /* Mailbox 22 Data Word 3 [63:48] Register */
-#define CAN_MB22_LENGTH 0xFFC02ED0 /* Mailbox 22 Data Length Code Register */
-#define CAN_MB22_TIMESTAMP 0xFFC02ED4 /* Mailbox 22 Time Stamp Value Register */
-#define CAN_MB22_ID0 0xFFC02ED8 /* Mailbox 22 Identifier Low Register */
-#define CAN_MB22_ID1 0xFFC02EDC /* Mailbox 22 Identifier High Register */
-
-#define CAN_MB23_DATA0 0xFFC02EE0 /* Mailbox 23 Data Word 0 [15:0] Register */
-#define CAN_MB23_DATA1 0xFFC02EE4 /* Mailbox 23 Data Word 1 [31:16] Register */
-#define CAN_MB23_DATA2 0xFFC02EE8 /* Mailbox 23 Data Word 2 [47:32] Register */
-#define CAN_MB23_DATA3 0xFFC02EEC /* Mailbox 23 Data Word 3 [63:48] Register */
-#define CAN_MB23_LENGTH 0xFFC02EF0 /* Mailbox 23 Data Length Code Register */
-#define CAN_MB23_TIMESTAMP 0xFFC02EF4 /* Mailbox 23 Time Stamp Value Register */
-#define CAN_MB23_ID0 0xFFC02EF8 /* Mailbox 23 Identifier Low Register */
-#define CAN_MB23_ID1 0xFFC02EFC /* Mailbox 23 Identifier High Register */
-
-#define CAN_MB24_DATA0 0xFFC02F00 /* Mailbox 24 Data Word 0 [15:0] Register */
-#define CAN_MB24_DATA1 0xFFC02F04 /* Mailbox 24 Data Word 1 [31:16] Register */
-#define CAN_MB24_DATA2 0xFFC02F08 /* Mailbox 24 Data Word 2 [47:32] Register */
-#define CAN_MB24_DATA3 0xFFC02F0C /* Mailbox 24 Data Word 3 [63:48] Register */
-#define CAN_MB24_LENGTH 0xFFC02F10 /* Mailbox 24 Data Length Code Register */
-#define CAN_MB24_TIMESTAMP 0xFFC02F14 /* Mailbox 24 Time Stamp Value Register */
-#define CAN_MB24_ID0 0xFFC02F18 /* Mailbox 24 Identifier Low Register */
-#define CAN_MB24_ID1 0xFFC02F1C /* Mailbox 24 Identifier High Register */
-
-#define CAN_MB25_DATA0 0xFFC02F20 /* Mailbox 25 Data Word 0 [15:0] Register */
-#define CAN_MB25_DATA1 0xFFC02F24 /* Mailbox 25 Data Word 1 [31:16] Register */
-#define CAN_MB25_DATA2 0xFFC02F28 /* Mailbox 25 Data Word 2 [47:32] Register */
-#define CAN_MB25_DATA3 0xFFC02F2C /* Mailbox 25 Data Word 3 [63:48] Register */
-#define CAN_MB25_LENGTH 0xFFC02F30 /* Mailbox 25 Data Length Code Register */
-#define CAN_MB25_TIMESTAMP 0xFFC02F34 /* Mailbox 25 Time Stamp Value Register */
-#define CAN_MB25_ID0 0xFFC02F38 /* Mailbox 25 Identifier Low Register */
-#define CAN_MB25_ID1 0xFFC02F3C /* Mailbox 25 Identifier High Register */
-
-#define CAN_MB26_DATA0 0xFFC02F40 /* Mailbox 26 Data Word 0 [15:0] Register */
-#define CAN_MB26_DATA1 0xFFC02F44 /* Mailbox 26 Data Word 1 [31:16] Register */
-#define CAN_MB26_DATA2 0xFFC02F48 /* Mailbox 26 Data Word 2 [47:32] Register */
-#define CAN_MB26_DATA3 0xFFC02F4C /* Mailbox 26 Data Word 3 [63:48] Register */
-#define CAN_MB26_LENGTH 0xFFC02F50 /* Mailbox 26 Data Length Code Register */
-#define CAN_MB26_TIMESTAMP 0xFFC02F54 /* Mailbox 26 Time Stamp Value Register */
-#define CAN_MB26_ID0 0xFFC02F58 /* Mailbox 26 Identifier Low Register */
-#define CAN_MB26_ID1 0xFFC02F5C /* Mailbox 26 Identifier High Register */
-
-#define CAN_MB27_DATA0 0xFFC02F60 /* Mailbox 27 Data Word 0 [15:0] Register */
-#define CAN_MB27_DATA1 0xFFC02F64 /* Mailbox 27 Data Word 1 [31:16] Register */
-#define CAN_MB27_DATA2 0xFFC02F68 /* Mailbox 27 Data Word 2 [47:32] Register */
-#define CAN_MB27_DATA3 0xFFC02F6C /* Mailbox 27 Data Word 3 [63:48] Register */
-#define CAN_MB27_LENGTH 0xFFC02F70 /* Mailbox 27 Data Length Code Register */
-#define CAN_MB27_TIMESTAMP 0xFFC02F74 /* Mailbox 27 Time Stamp Value Register */
-#define CAN_MB27_ID0 0xFFC02F78 /* Mailbox 27 Identifier Low Register */
-#define CAN_MB27_ID1 0xFFC02F7C /* Mailbox 27 Identifier High Register */
-
-#define CAN_MB28_DATA0 0xFFC02F80 /* Mailbox 28 Data Word 0 [15:0] Register */
-#define CAN_MB28_DATA1 0xFFC02F84 /* Mailbox 28 Data Word 1 [31:16] Register */
-#define CAN_MB28_DATA2 0xFFC02F88 /* Mailbox 28 Data Word 2 [47:32] Register */
-#define CAN_MB28_DATA3 0xFFC02F8C /* Mailbox 28 Data Word 3 [63:48] Register */
-#define CAN_MB28_LENGTH 0xFFC02F90 /* Mailbox 28 Data Length Code Register */
-#define CAN_MB28_TIMESTAMP 0xFFC02F94 /* Mailbox 28 Time Stamp Value Register */
-#define CAN_MB28_ID0 0xFFC02F98 /* Mailbox 28 Identifier Low Register */
-#define CAN_MB28_ID1 0xFFC02F9C /* Mailbox 28 Identifier High Register */
-
-#define CAN_MB29_DATA0 0xFFC02FA0 /* Mailbox 29 Data Word 0 [15:0] Register */
-#define CAN_MB29_DATA1 0xFFC02FA4 /* Mailbox 29 Data Word 1 [31:16] Register */
-#define CAN_MB29_DATA2 0xFFC02FA8 /* Mailbox 29 Data Word 2 [47:32] Register */
-#define CAN_MB29_DATA3 0xFFC02FAC /* Mailbox 29 Data Word 3 [63:48] Register */
-#define CAN_MB29_LENGTH 0xFFC02FB0 /* Mailbox 29 Data Length Code Register */
-#define CAN_MB29_TIMESTAMP 0xFFC02FB4 /* Mailbox 29 Time Stamp Value Register */
-#define CAN_MB29_ID0 0xFFC02FB8 /* Mailbox 29 Identifier Low Register */
-#define CAN_MB29_ID1 0xFFC02FBC /* Mailbox 29 Identifier High Register */
-
-#define CAN_MB30_DATA0 0xFFC02FC0 /* Mailbox 30 Data Word 0 [15:0] Register */
-#define CAN_MB30_DATA1 0xFFC02FC4 /* Mailbox 30 Data Word 1 [31:16] Register */
-#define CAN_MB30_DATA2 0xFFC02FC8 /* Mailbox 30 Data Word 2 [47:32] Register */
-#define CAN_MB30_DATA3 0xFFC02FCC /* Mailbox 30 Data Word 3 [63:48] Register */
-#define CAN_MB30_LENGTH 0xFFC02FD0 /* Mailbox 30 Data Length Code Register */
-#define CAN_MB30_TIMESTAMP 0xFFC02FD4 /* Mailbox 30 Time Stamp Value Register */
-#define CAN_MB30_ID0 0xFFC02FD8 /* Mailbox 30 Identifier Low Register */
-#define CAN_MB30_ID1 0xFFC02FDC /* Mailbox 30 Identifier High Register */
-
-#define CAN_MB31_DATA0 0xFFC02FE0 /* Mailbox 31 Data Word 0 [15:0] Register */
-#define CAN_MB31_DATA1 0xFFC02FE4 /* Mailbox 31 Data Word 1 [31:16] Register */
-#define CAN_MB31_DATA2 0xFFC02FE8 /* Mailbox 31 Data Word 2 [47:32] Register */
-#define CAN_MB31_DATA3 0xFFC02FEC /* Mailbox 31 Data Word 3 [63:48] Register */
-#define CAN_MB31_LENGTH 0xFFC02FF0 /* Mailbox 31 Data Length Code Register */
-#define CAN_MB31_TIMESTAMP 0xFFC02FF4 /* Mailbox 31 Time Stamp Value Register */
-#define CAN_MB31_ID0 0xFFC02FF8 /* Mailbox 31 Identifier Low Register */
-#define CAN_MB31_ID1 0xFFC02FFC /* Mailbox 31 Identifier High Register */
-
-/* CAN Mailbox Area Macros */
-#define CAN_MB_ID1(x) (CAN_MB00_ID1+((x)*0x20))
-#define CAN_MB_ID0(x) (CAN_MB00_ID0+((x)*0x20))
-#define CAN_MB_TIMESTAMP(x) (CAN_MB00_TIMESTAMP+((x)*0x20))
-#define CAN_MB_LENGTH(x) (CAN_MB00_LENGTH+((x)*0x20))
-#define CAN_MB_DATA3(x) (CAN_MB00_DATA3+((x)*0x20))
-#define CAN_MB_DATA2(x) (CAN_MB00_DATA2+((x)*0x20))
-#define CAN_MB_DATA1(x) (CAN_MB00_DATA1+((x)*0x20))
-#define CAN_MB_DATA0(x) (CAN_MB00_DATA0+((x)*0x20))
-
-
-/*********************************************************************************** */
-/* System MMR Register Bits and Macros */
-/******************************************************************************* */
-
-/* SWRST Mask */
-#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
-#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
-#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
-#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
-#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
-
-/* SYSCR Masks */
-#define BMODE 0x0006 /* Boot Mode - Latched During HW Reset From Mode Pins */
-#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
-
-
-/* ************* SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
-
-/* Peripheral Masks For SIC0_ISR, SIC0_IWR, SIC0_IMASK */
-#define PLL_WAKEUP_IRQ 0x00000001 /* PLL Wakeup Interrupt Request */
-#define DMAC0_ERR_IRQ 0x00000002 /* DMA Controller 0 Error Interrupt Request */
-#define PPI_ERR_IRQ 0x00000004 /* PPI Error Interrupt Request */
-#define SPORT0_ERR_IRQ 0x00000008 /* SPORT0 Error Interrupt Request */
-#define SPORT1_ERR_IRQ 0x00000010 /* SPORT1 Error Interrupt Request */
-#define SPI0_ERR_IRQ 0x00000020 /* SPI0 Error Interrupt Request */
-#define UART0_ERR_IRQ 0x00000040 /* UART0 Error Interrupt Request */
-#define RTC_IRQ 0x00000080 /* Real-Time Clock Interrupt Request */
-#define DMA0_IRQ 0x00000100 /* DMA Channel 0 (PPI) Interrupt Request */
-#define DMA1_IRQ 0x00000200 /* DMA Channel 1 (SPORT0 RX) Interrupt Request */
-#define DMA2_IRQ 0x00000400 /* DMA Channel 2 (SPORT0 TX) Interrupt Request */
-#define DMA3_IRQ 0x00000800 /* DMA Channel 3 (SPORT1 RX) Interrupt Request */
-#define DMA4_IRQ 0x00001000 /* DMA Channel 4 (SPORT1 TX) Interrupt Request */
-#define DMA5_IRQ 0x00002000 /* DMA Channel 5 (SPI) Interrupt Request */
-#define DMA6_IRQ 0x00004000 /* DMA Channel 6 (UART RX) Interrupt Request */
-#define DMA7_IRQ 0x00008000 /* DMA Channel 7 (UART TX) Interrupt Request */
-#define TIMER0_IRQ 0x00010000 /* Timer 0 Interrupt Request */
-#define TIMER1_IRQ 0x00020000 /* Timer 1 Interrupt Request */
-#define TIMER2_IRQ 0x00040000 /* Timer 2 Interrupt Request */
-#define PFA_IRQ 0x00080000 /* Programmable Flag Interrupt Request A */
-#define PFB_IRQ 0x00100000 /* Programmable Flag Interrupt Request B */
-#define MDMA0_0_IRQ 0x00200000 /* MemDMA0 Stream 0 Interrupt Request */
-#define MDMA0_1_IRQ 0x00400000 /* MemDMA0 Stream 1 Interrupt Request */
-#define WDOG_IRQ 0x00800000 /* Software Watchdog Timer Interrupt Request */
-#define DMAC1_ERR_IRQ 0x01000000 /* DMA Controller 1 Error Interrupt Request */
-#define SPORT2_ERR_IRQ 0x02000000 /* SPORT2 Error Interrupt Request */
-#define SPORT3_ERR_IRQ 0x04000000 /* SPORT3 Error Interrupt Request */
-#define MXVR_SD_IRQ 0x08000000 /* MXVR Synchronous Data Interrupt Request */
-#define SPI1_ERR_IRQ 0x10000000 /* SPI1 Error Interrupt Request */
-#define SPI2_ERR_IRQ 0x20000000 /* SPI2 Error Interrupt Request */
-#define UART1_ERR_IRQ 0x40000000 /* UART1 Error Interrupt Request */
-#define UART2_ERR_IRQ 0x80000000 /* UART2 Error Interrupt Request */
-
-/* the following are for backwards compatibility */
-#define DMA0_ERR_IRQ DMAC0_ERR_IRQ
-#define DMA1_ERR_IRQ DMAC1_ERR_IRQ
-
-
-/* Peripheral Masks For SIC_ISR1, SIC_IWR1, SIC_IMASK1 */
-#define CAN_ERR_IRQ 0x00000001 /* CAN Error Interrupt Request */
-#define DMA8_IRQ 0x00000002 /* DMA Channel 8 (SPORT2 RX) Interrupt Request */
-#define DMA9_IRQ 0x00000004 /* DMA Channel 9 (SPORT2 TX) Interrupt Request */
-#define DMA10_IRQ 0x00000008 /* DMA Channel 10 (SPORT3 RX) Interrupt Request */
-#define DMA11_IRQ 0x00000010 /* DMA Channel 11 (SPORT3 TX) Interrupt Request */
-#define DMA12_IRQ 0x00000020 /* DMA Channel 12 Interrupt Request */
-#define DMA13_IRQ 0x00000040 /* DMA Channel 13 Interrupt Request */
-#define DMA14_IRQ 0x00000080 /* DMA Channel 14 (SPI1) Interrupt Request */
-#define DMA15_IRQ 0x00000100 /* DMA Channel 15 (SPI2) Interrupt Request */
-#define DMA16_IRQ 0x00000200 /* DMA Channel 16 (UART1 RX) Interrupt Request */
-#define DMA17_IRQ 0x00000400 /* DMA Channel 17 (UART1 TX) Interrupt Request */
-#define DMA18_IRQ 0x00000800 /* DMA Channel 18 (UART2 RX) Interrupt Request */
-#define DMA19_IRQ 0x00001000 /* DMA Channel 19 (UART2 TX) Interrupt Request */
-#define TWI0_IRQ 0x00002000 /* TWI0 Interrupt Request */
-#define TWI1_IRQ 0x00004000 /* TWI1 Interrupt Request */
-#define CAN_RX_IRQ 0x00008000 /* CAN Receive Interrupt Request */
-#define CAN_TX_IRQ 0x00010000 /* CAN Transmit Interrupt Request */
-#define MDMA1_0_IRQ 0x00020000 /* MemDMA1 Stream 0 Interrupt Request */
-#define MDMA1_1_IRQ 0x00040000 /* MemDMA1 Stream 1 Interrupt Request */
-#define MXVR_STAT_IRQ 0x00080000 /* MXVR Status Interrupt Request */
-#define MXVR_CM_IRQ 0x00100000 /* MXVR Control Message Interrupt Request */
-#define MXVR_AP_IRQ 0x00200000 /* MXVR Asynchronous Packet Interrupt */
-
-/* the following are for backwards compatibility */
-#define MDMA0_IRQ MDMA1_0_IRQ
-#define MDMA1_IRQ MDMA1_1_IRQ
-
-#ifdef _MISRA_RULES
-#define _MF15 0xFu
-#define _MF7 7u
-#else
-#define _MF15 0xF
-#define _MF7 7
-#endif /* _MISRA_RULES */
-
-/* SIC_IMASKx Masks */
-#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
-#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
-#ifdef _MISRA_RULES
-#define SIC_MASK(x) (1 << ((x)&0x1Fu)) /* Mask Peripheral #x interrupt */
-#define SIC_UNMASK(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Unmask Peripheral #x interrupt */
-#else
-#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
-#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
-#endif /* _MISRA_RULES */
-
-/* SIC_IWRx Masks */
-#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
-#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
-#ifdef _MISRA_RULES
-#define IWR_ENABLE(x) (1 << ((x)&0x1Fu)) /* Wakeup Enable Peripheral #x */
-#define IWR_DISABLE(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Wakeup Disable Peripheral #x */
-#else
-#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
-#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
-#endif /* _MISRA_RULES */
-
-
-/* ***************************** UART CONTROLLER MASKS ********************** */
-/* UARTx_LCR Register */
-#ifdef _MISRA_RULES
-#define WLS(x) (((x)-5u) & 0x03u) /* Word Length Select */
-#else
-#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
-#endif /* _MISRA_RULES */
-#define STB 0x04 /* Stop Bits */
-#define PEN 0x08 /* Parity Enable */
-#define EPS 0x10 /* Even Parity Select */
-#define STP 0x20 /* Stick Parity */
-#define SB 0x40 /* Set Break */
-#define DLAB 0x80 /* Divisor Latch Access */
-
-#define DLAB_P 0x07
-#define SB_P 0x06
-#define STP_P 0x05
-#define EPS_P 0x04
-#define PEN_P 0x03
-#define STB_P 0x02
-#define WLS_P1 0x01
-#define WLS_P0 0x00
-
-/* UARTx_MCR Register */
-#define LOOP_ENA 0x10 /* Loopback Mode Enable */
-#define LOOP_ENA_P 0x04
-/* Deprecated UARTx_MCR Mask */
-
-/* UARTx_LSR Register */
-#define DR 0x01 /* Data Ready */
-#define OE 0x02 /* Overrun Error */
-#define PE 0x04 /* Parity Error */
-#define FE 0x08 /* Framing Error */
-#define BI 0x10 /* Break Interrupt */
-#define THRE 0x20 /* THR Empty */
-#define TEMT 0x40 /* TSR and UART_THR Empty */
-
-#define TEMP_P 0x06
-#define THRE_P 0x05
-#define BI_P 0x04
-#define FE_P 0x03
-#define PE_P 0x02
-#define OE_P 0x01
-#define DR_P 0x00
-
-/* UARTx_IER Register */
-#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
-#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
-#define ELSI 0x04 /* Enable RX Status Interrupt */
-
-#define ELSI_P 0x02
-#define ETBEI_P 0x01
-#define ERBFI_P 0x00
-
-/* UARTx_IIR Register */
-#define NINT 0x01
-#define STATUS_P1 0x02
-#define STATUS_P0 0x01
-#define NINT_P 0x00
-
-/* UARTx_GCTL Register */
-#define UCEN 0x01 /* Enable UARTx Clocks */
-#define IREN 0x02 /* Enable IrDA Mode */
-#define TPOLC 0x04 /* IrDA TX Polarity Change */
-#define RPOLC 0x08 /* IrDA RX Polarity Change */
-#define FPE 0x10 /* Force Parity Error On Transmit */
-#define FFE 0x20 /* Force Framing Error On Transmit */
-
-#define FFE_P 0x05
-#define FPE_P 0x04
-#define RPOLC_P 0x03
-#define TPOLC_P 0x02
-#define IREN_P 0x01
-#define UCEN_P 0x00
-
-
-/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
-/* PPI_CONTROL Masks */
-#define PORT_EN 0x0001 /* PPI Port Enable */
-#define PORT_DIR 0x0002 /* PPI Port Direction */
-#define XFR_TYPE 0x000C /* PPI Transfer Type */
-#define PORT_CFG 0x0030 /* PPI Port Configuration */
-#define FLD_SEL 0x0040 /* PPI Active Field Select */
-#define PACK_EN 0x0080 /* PPI Packing Mode */
-/* previous versions of defBF539.h erroneously included DMA32 (PPI 32-bit DMA Enable) */
-#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
-#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
-#define DLENGTH 0x3800 /* PPI Data Length */
-#define DLEN_8 0x0 /* PPI Data Length mask for DLEN=8 */
-#define DLEN_10 0x0800 /* Data Length = 10 Bits */
-#define DLEN_11 0x1000 /* Data Length = 11 Bits */
-#define DLEN_12 0x1800 /* Data Length = 12 Bits */
-#define DLEN_13 0x2000 /* Data Length = 13 Bits */
-#define DLEN_14 0x2800 /* Data Length = 14 Bits */
-#define DLEN_15 0x3000 /* Data Length = 15 Bits */
-#define DLEN_16 0x3800 /* Data Length = 16 Bits */
-#ifdef _MISRA_RULES
-#define DLEN(x) ((((x)-9u) & 0x07u) << 11) /* PPI Data Length (only works for x=10-->x=16) */
-#else
-#define DLEN(x) ((((x)-9) & 0x07) << 11) /* PPI Data Length (only works for x=10-->x=16) */
-#endif /* _MISRA_RULES */
-#define POL 0xC000 /* PPI Signal Polarities */
-#define POLC 0x4000 /* PPI Clock Polarity */
-#define POLS 0x8000 /* PPI Frame Sync Polarity */
-
-
-/* PPI_STATUS Masks */
-#define FLD 0x0400 /* Field Indicator */
-#define FT_ERR 0x0800 /* Frame Track Error */
-#define OVR 0x1000 /* FIFO Overflow Error */
-#define UNDR 0x2000 /* FIFO Underrun Error */
-#define ERR_DET 0x4000 /* Error Detected Indicator */
-#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
-
-
-/* ********** DMA CONTROLLER MASKS ***********************/
-
-/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
-
-#define CTYPE 0x0040 /* DMA Channel Type Indicator */
-#define CTYPE_P 0x6 /* DMA Channel Type Indicator BIT POSITION */
-#define PCAP8 0x0080 /* DMA 8-bit Operation Indicator */
-#define PCAP16 0x0100 /* DMA 16-bit Operation Indicator */
-#define PCAP32 0x0200 /* DMA 32-bit Operation Indicator */
-#define PCAPWR 0x0400 /* DMA Write Operation Indicator */
-#define PCAPRD 0x0800 /* DMA Read Operation Indicator */
-#define PMAP 0xF000 /* DMA Peripheral Map Field */
-
-/* PMAP Encodings For DMA Controller 0 */
-#define PMAP_PPI 0x0000 /* PMAP PPI Port DMA */
-#define PMAP_SPORT0RX 0x1000 /* PMAP SPORT0 Receive DMA */
-#define PMAP_SPORT0TX 0x2000 /* PMAP SPORT0 Transmit DMA */
-#define PMAP_SPORT1RX 0x3000 /* PMAP SPORT1 Receive DMA */
-#define PMAP_SPORT1TX 0x4000 /* PMAP SPORT1 Transmit DMA */
-#define PMAP_SPI0 0x5000 /* PMAP SPI DMA */
-#define PMAP_UART0RX 0x6000 /* PMAP UART Receive DMA */
-#define PMAP_UART0TX 0x7000 /* PMAP UART Transmit DMA */
-
-/* PMAP Encodings For DMA Controller 1 */
-#define PMAP_SPORT2RX 0x0000 /* PMAP SPORT2 Receive DMA */
-#define PMAP_SPORT2TX 0x1000 /* PMAP SPORT2 Transmit DMA */
-#define PMAP_SPORT3RX 0x2000 /* PMAP SPORT3 Receive DMA */
-#define PMAP_SPORT3TX 0x3000 /* PMAP SPORT3 Transmit DMA */
-#define PMAP_SPI1 0x6000 /* PMAP SPI1 DMA */
-#define PMAP_SPI2 0x7000 /* PMAP SPI2 DMA */
-#define PMAP_UART1RX 0x8000 /* PMAP UART1 Receive DMA */
-#define PMAP_UART1TX 0x9000 /* PMAP UART1 Transmit DMA */
-#define PMAP_UART2RX 0xA000 /* PMAP UART2 Receive DMA */
-#define PMAP_UART2TX 0xB000 /* PMAP UART2 Transmit DMA */
-
-
-/* ************* GENERAL PURPOSE TIMER MASKS ******************** */
-/* PWM Timer bit definitions */
-/* TIMER_ENABLE Register */
-#define TIMEN0 0x0001 /* Enable Timer 0 */
-#define TIMEN1 0x0002 /* Enable Timer 1 */
-#define TIMEN2 0x0004 /* Enable Timer 2 */
-
-#define TIMEN0_P 0x00
-#define TIMEN1_P 0x01
-#define TIMEN2_P 0x02
-
-/* TIMER_DISABLE Register */
-#define TIMDIS0 0x0001 /* Disable Timer 0 */
-#define TIMDIS1 0x0002 /* Disable Timer 1 */
-#define TIMDIS2 0x0004 /* Disable Timer 2 */
-
-#define TIMDIS0_P 0x00
-#define TIMDIS1_P 0x01
-#define TIMDIS2_P 0x02
-
-/* TIMER_STATUS Register */
-#define TIMIL0 0x0001 /* Timer 0 Interrupt */
-#define TIMIL1 0x0002 /* Timer 1 Interrupt */
-#define TIMIL2 0x0004 /* Timer 2 Interrupt */
-#define TOVF_ERR0 0x0010 /* Timer 0 Counter Overflow */
-#define TOVF_ERR1 0x0020 /* Timer 1 Counter Overflow */
-#define TOVF_ERR2 0x0040 /* Timer 2 Counter Overflow */
-#define TRUN0 0x1000 /* Timer 0 Slave Enable Status */
-#define TRUN1 0x2000 /* Timer 1 Slave Enable Status */
-#define TRUN2 0x4000 /* Timer 2 Slave Enable Status */
-
-#define TIMIL0_P 0x00
-#define TIMIL1_P 0x01
-#define TIMIL2_P 0x02
-#define TOVF_ERR0_P 0x04
-#define TOVF_ERR1_P 0x05
-#define TOVF_ERR2_P 0x06
-#define TRUN0_P 0x0C
-#define TRUN1_P 0x0D
-#define TRUN2_P 0x0E
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define TOVL_ERR0 TOVF_ERR0
-#define TOVL_ERR1 TOVF_ERR1
-#define TOVL_ERR2 TOVF_ERR2
-#define TOVL_ERR0_P TOVF_ERR0_P
-#define TOVL_ERR1_P TOVF_ERR1_P
-#define TOVL_ERR2_P TOVF_ERR2_P
-
-/* TIMERx_CONFIG Registers */
-#define PWM_OUT 0x0001
-#define WDTH_CAP 0x0002
-#define EXT_CLK 0x0003
-#define PULSE_HI 0x0004
-#define PERIOD_CNT 0x0008
-#define IRQ_ENA 0x0010
-#define TIN_SEL 0x0020
-#define OUT_DIS 0x0040
-#define CLK_SEL 0x0080
-#define TOGGLE_HI 0x0100
-#define EMU_RUN 0x0200
-#ifdef _MISRA_RULES
-#define ERR_TYP(x) (((x) & 0x03u) << 14)
-#else
-#define ERR_TYP(x) (((x) & 0x03) << 14)
-#endif /* _MISRA_RULES */
-
-#define TMODE_P0 0x00
-#define TMODE_P1 0x01
-#define PULSE_HI_P 0x02
-#define PERIOD_CNT_P 0x03
-#define IRQ_ENA_P 0x04
-#define TIN_SEL_P 0x05
-#define OUT_DIS_P 0x06
-#define CLK_SEL_P 0x07
-#define TOGGLE_HI_P 0x08
-#define EMU_RUN_P 0x09
-#define ERR_TYP_P0 0x0E
-#define ERR_TYP_P1 0x0F
-
-
-/*/ ****************** GENERAL-PURPOSE I/O ********************* */
-/* Flag I/O (FIO_) Masks */
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* PORT F BIT POSITIONS */
-#define PF0_P 0x0
-#define PF1_P 0x1
-#define PF2_P 0x2
-#define PF3_P 0x3
-#define PF4_P 0x4
-#define PF5_P 0x5
-#define PF6_P 0x6
-#define PF7_P 0x7
-#define PF8_P 0x8
-#define PF9_P 0x9
-#define PF10_P 0xA
-#define PF11_P 0xB
-#define PF12_P 0xC
-#define PF13_P 0xD
-#define PF14_P 0xE
-#define PF15_P 0xF
-
-
-/******************* GPIO MASKS *********************/
-/* Port C Masks */
-#define PC0 0x0001
-#define PC1 0x0002
-#define PC4 0x0010
-#define PC5 0x0020
-#define PC6 0x0040
-#define PC7 0x0080
-#define PC8 0x0100
-#define PC9 0x0200
-/* Port C Bit Positions */
-#define PC0_P 0x0
-#define PC1_P 0x1
-#define PC4_P 0x4
-#define PC5_P 0x5
-#define PC6_P 0x6
-#define PC7_P 0x7
-#define PC8_P 0x8
-#define PC9_P 0x9
-
-/* Port D */
-#define PD0 0x0001
-#define PD1 0x0002
-#define PD2 0x0004
-#define PD3 0x0008
-#define PD4 0x0010
-#define PD5 0x0020
-#define PD6 0x0040
-#define PD7 0x0080
-#define PD8 0x0100
-#define PD9 0x0200
-#define PD10 0x0400
-#define PD11 0x0800
-#define PD12 0x1000
-#define PD13 0x2000
-#define PD14 0x4000
-#define PD15 0x8000
-/* Port D Bit Positions */
-#define PD0_P 0x0
-#define PD1_P 0x1
-#define PD2_P 0x2
-#define PD3_P 0x3
-#define PD4_P 0x4
-#define PD5_P 0x5
-#define PD6_P 0x6
-#define PD7_P 0x7
-#define PD8_P 0x8
-#define PD9_P 0x9
-#define PD10_P 0xA
-#define PD11_P 0xB
-#define PD12_P 0xC
-#define PD13_P 0xD
-#define PD14_P 0xE
-#define PD15_P 0xF
-
-/* Port E */
-#define PE0 0x0001
-#define PE1 0x0002
-#define PE2 0x0004
-#define PE3 0x0008
-#define PE4 0x0010
-#define PE5 0x0020
-#define PE6 0x0040
-#define PE7 0x0080
-#define PE8 0x0100
-#define PE9 0x0200
-#define PE10 0x0400
-#define PE11 0x0800
-#define PE12 0x1000
-#define PE13 0x2000
-#define PE14 0x4000
-#define PE15 0x8000
-/* Port E Bit Positions */
-#define PE0_P 0x0
-#define PE1_P 0x1
-#define PE2_P 0x2
-#define PE3_P 0x3
-#define PE4_P 0x4
-#define PE5_P 0x5
-#define PE6_P 0x6
-#define PE7_P 0x7
-#define PE8_P 0x8
-#define PE9_P 0x9
-#define PE10_P 0xA
-#define PE11_P 0xB
-#define PE12_P 0xC
-#define PE13_P 0xD
-#define PE14_P 0xE
-#define PE15_P 0xF
-
-/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
-/* EBIU_AMGCTL Masks */
-#define AMCKEN 0x0001 /* Enable CLKOUT */
-#define AMBEN_NONE 0x0000 /* All Banks Disabled */
-#define AMBEN_B0 0x0002 /* Enable Asynchronous Memory Bank 0 only */
-#define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */
-#define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */
-#define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */
-#define CDPRIO 0x0100 /* DMA has priority over core for external accesses */
-
-/* EBIU_AMGCTL Bit Positions */
-#define AMCKEN_P 0x0000 /* Enable CLKOUT */
-#define AMBEN_P0 0x0001 /* Asynchronous Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
-#define AMBEN_P1 0x0002 /* Asynchronous Memory Enable, 010 - banks 0&1 enabled, 011 - banks 0-3 enabled */
-#define AMBEN_P2 0x0003 /* Asynchronous Memory Enable, 1xx - All banks (bank 0, 1, 2, and 3) enabled */
-
-/* EBIU_AMBCTL0 Masks */
-#define B0RDYEN 0x00000001 /* Bank 0 RDY Enable, 0=disable, 1=enable */
-#define B0RDYPOL 0x00000002 /* Bank 0 RDY Active high, 0=active low, 1=active high */
-#define B0TT_1 0x00000004 /* Bank 0 Transition Time from Read to Write = 1 cycle */
-#define B0TT_2 0x00000008 /* Bank 0 Transition Time from Read to Write = 2 cycles */
-#define B0TT_3 0x0000000C /* Bank 0 Transition Time from Read to Write = 3 cycles */
-#define B0TT_4 0x00000000 /* Bank 0 Transition Time from Read to Write = 4 cycles */
-#define B0ST_1 0x00000010 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
-#define B0ST_2 0x00000020 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
-#define B0ST_3 0x00000030 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
-#define B0ST_4 0x00000000 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
-#define B0HT_1 0x00000040 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 1 cycle */
-#define B0HT_2 0x00000080 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 2 cycles */
-#define B0HT_3 0x000000C0 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 3 cycles */
-#define B0HT_0 0x00000000 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 0 cycles */
-#define B0RAT_1 0x00000100 /* Bank 0 Read Access Time = 1 cycle */
-#define B0RAT_2 0x00000200 /* Bank 0 Read Access Time = 2 cycles */
-#define B0RAT_3 0x00000300 /* Bank 0 Read Access Time = 3 cycles */
-#define B0RAT_4 0x00000400 /* Bank 0 Read Access Time = 4 cycles */
-#define B0RAT_5 0x00000500 /* Bank 0 Read Access Time = 5 cycles */
-#define B0RAT_6 0x00000600 /* Bank 0 Read Access Time = 6 cycles */
-#define B0RAT_7 0x00000700 /* Bank 0 Read Access Time = 7 cycles */
-#define B0RAT_8 0x00000800 /* Bank 0 Read Access Time = 8 cycles */
-#define B0RAT_9 0x00000900 /* Bank 0 Read Access Time = 9 cycles */
-#define B0RAT_10 0x00000A00 /* Bank 0 Read Access Time = 10 cycles */
-#define B0RAT_11 0x00000B00 /* Bank 0 Read Access Time = 11 cycles */
-#define B0RAT_12 0x00000C00 /* Bank 0 Read Access Time = 12 cycles */
-#define B0RAT_13 0x00000D00 /* Bank 0 Read Access Time = 13 cycles */
-#define B0RAT_14 0x00000E00 /* Bank 0 Read Access Time = 14 cycles */
-#define B0RAT_15 0x00000F00 /* Bank 0 Read Access Time = 15 cycles */
-#define B0WAT_1 0x00001000 /* Bank 0 Write Access Time = 1 cycle */
-#define B0WAT_2 0x00002000 /* Bank 0 Write Access Time = 2 cycles */
-#define B0WAT_3 0x00003000 /* Bank 0 Write Access Time = 3 cycles */
-#define B0WAT_4 0x00004000 /* Bank 0 Write Access Time = 4 cycles */
-#define B0WAT_5 0x00005000 /* Bank 0 Write Access Time = 5 cycles */
-#define B0WAT_6 0x00006000 /* Bank 0 Write Access Time = 6 cycles */
-#define B0WAT_7 0x00007000 /* Bank 0 Write Access Time = 7 cycles */
-#define B0WAT_8 0x00008000 /* Bank 0 Write Access Time = 8 cycles */
-#define B0WAT_9 0x00009000 /* Bank 0 Write Access Time = 9 cycles */
-#define B0WAT_10 0x0000A000 /* Bank 0 Write Access Time = 10 cycles */
-#define B0WAT_11 0x0000B000 /* Bank 0 Write Access Time = 11 cycles */
-#define B0WAT_12 0x0000C000 /* Bank 0 Write Access Time = 12 cycles */
-#define B0WAT_13 0x0000D000 /* Bank 0 Write Access Time = 13 cycles */
-#define B0WAT_14 0x0000E000 /* Bank 0 Write Access Time = 14 cycles */
-#define B0WAT_15 0x0000F000 /* Bank 0 Write Access Time = 15 cycles */
-#define B1RDYEN 0x00010000 /* Bank 1 RDY enable, 0=disable, 1=enable */
-#define B1RDYPOL 0x00020000 /* Bank 1 RDY Active high, 0=active low, 1=active high */
-#define B1TT_1 0x00040000 /* Bank 1 Transition Time from Read to Write = 1 cycle */
-#define B1TT_2 0x00080000 /* Bank 1 Transition Time from Read to Write = 2 cycles */
-#define B1TT_3 0x000C0000 /* Bank 1 Transition Time from Read to Write = 3 cycles */
-#define B1TT_4 0x00000000 /* Bank 1 Transition Time from Read to Write = 4 cycles */
-#define B1ST_1 0x00100000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define B1ST_2 0x00200000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define B1ST_3 0x00300000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define B1ST_4 0x00000000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define B1HT_1 0x00400000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
-#define B1HT_2 0x00800000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
-#define B1HT_3 0x00C00000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
-#define B1HT_0 0x00000000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
-#define B1RAT_1 0x01000000 /* Bank 1 Read Access Time = 1 cycle */
-#define B1RAT_2 0x02000000 /* Bank 1 Read Access Time = 2 cycles */
-#define B1RAT_3 0x03000000 /* Bank 1 Read Access Time = 3 cycles */
-#define B1RAT_4 0x04000000 /* Bank 1 Read Access Time = 4 cycles */
-#define B1RAT_5 0x05000000 /* Bank 1 Read Access Time = 5 cycles */
-#define B1RAT_6 0x06000000 /* Bank 1 Read Access Time = 6 cycles */
-#define B1RAT_7 0x07000000 /* Bank 1 Read Access Time = 7 cycles */
-#define B1RAT_8 0x08000000 /* Bank 1 Read Access Time = 8 cycles */
-#define B1RAT_9 0x09000000 /* Bank 1 Read Access Time = 9 cycles */
-#define B1RAT_10 0x0A000000 /* Bank 1 Read Access Time = 10 cycles */
-#define B1RAT_11 0x0B000000 /* Bank 1 Read Access Time = 11 cycles */
-#define B1RAT_12 0x0C000000 /* Bank 1 Read Access Time = 12 cycles */
-#define B1RAT_13 0x0D000000 /* Bank 1 Read Access Time = 13 cycles */
-#define B1RAT_14 0x0E000000 /* Bank 1 Read Access Time = 14 cycles */
-#define B1RAT_15 0x0F000000 /* Bank 1 Read Access Time = 15 cycles */
-#define B1WAT_1 0x10000000 /* Bank 1 Write Access Time = 1 cycle */
-#define B1WAT_2 0x20000000 /* Bank 1 Write Access Time = 2 cycles */
-#define B1WAT_3 0x30000000 /* Bank 1 Write Access Time = 3 cycles */
-#define B1WAT_4 0x40000000 /* Bank 1 Write Access Time = 4 cycles */
-#define B1WAT_5 0x50000000 /* Bank 1 Write Access Time = 5 cycles */
-#define B1WAT_6 0x60000000 /* Bank 1 Write Access Time = 6 cycles */
-#define B1WAT_7 0x70000000 /* Bank 1 Write Access Time = 7 cycles */
-#define B1WAT_8 0x80000000 /* Bank 1 Write Access Time = 8 cycles */
-#define B1WAT_9 0x90000000 /* Bank 1 Write Access Time = 9 cycles */
-#define B1WAT_10 0xA0000000 /* Bank 1 Write Access Time = 10 cycles */
-#define B1WAT_11 0xB0000000 /* Bank 1 Write Access Time = 11 cycles */
-#define B1WAT_12 0xC0000000 /* Bank 1 Write Access Time = 12 cycles */
-#define B1WAT_13 0xD0000000 /* Bank 1 Write Access Time = 13 cycles */
-#define B1WAT_14 0xE0000000 /* Bank 1 Write Access Time = 14 cycles */
-#define B1WAT_15 0xF0000000 /* Bank 1 Write Access Time = 15 cycles */
-
-/* EBIU_AMBCTL1 Masks */
-#define B2RDYEN 0x00000001 /* Bank 2 RDY Enable, 0=disable, 1=enable */
-#define B2RDYPOL 0x00000002 /* Bank 2 RDY Active high, 0=active low, 1=active high */
-#define B2TT_1 0x00000004 /* Bank 2 Transition Time from Read to Write = 1 cycle */
-#define B2TT_2 0x00000008 /* Bank 2 Transition Time from Read to Write = 2 cycles */
-#define B2TT_3 0x0000000C /* Bank 2 Transition Time from Read to Write = 3 cycles */
-#define B2TT_4 0x00000000 /* Bank 2 Transition Time from Read to Write = 4 cycles */
-#define B2ST_1 0x00000010 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define B2ST_2 0x00000020 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define B2ST_3 0x00000030 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define B2ST_4 0x00000000 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define B2HT_1 0x00000040 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
-#define B2HT_2 0x00000080 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
-#define B2HT_3 0x000000C0 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
-#define B2HT_0 0x00000000 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
-#define B2RAT_1 0x00000100 /* Bank 2 Read Access Time = 1 cycle */
-#define B2RAT_2 0x00000200 /* Bank 2 Read Access Time = 2 cycles */
-#define B2RAT_3 0x00000300 /* Bank 2 Read Access Time = 3 cycles */
-#define B2RAT_4 0x00000400 /* Bank 2 Read Access Time = 4 cycles */
-#define B2RAT_5 0x00000500 /* Bank 2 Read Access Time = 5 cycles */
-#define B2RAT_6 0x00000600 /* Bank 2 Read Access Time = 6 cycles */
-#define B2RAT_7 0x00000700 /* Bank 2 Read Access Time = 7 cycles */
-#define B2RAT_8 0x00000800 /* Bank 2 Read Access Time = 8 cycles */
-#define B2RAT_9 0x00000900 /* Bank 2 Read Access Time = 9 cycles */
-#define B2RAT_10 0x00000A00 /* Bank 2 Read Access Time = 10 cycles */
-#define B2RAT_11 0x00000B00 /* Bank 2 Read Access Time = 11 cycles */
-#define B2RAT_12 0x00000C00 /* Bank 2 Read Access Time = 12 cycles */
-#define B2RAT_13 0x00000D00 /* Bank 2 Read Access Time = 13 cycles */
-#define B2RAT_14 0x00000E00 /* Bank 2 Read Access Time = 14 cycles */
-#define B2RAT_15 0x00000F00 /* Bank 2 Read Access Time = 15 cycles */
-#define B2WAT_1 0x00001000 /* Bank 2 Write Access Time = 1 cycle */
-#define B2WAT_2 0x00002000 /* Bank 2 Write Access Time = 2 cycles */
-#define B2WAT_3 0x00003000 /* Bank 2 Write Access Time = 3 cycles */
-#define B2WAT_4 0x00004000 /* Bank 2 Write Access Time = 4 cycles */
-#define B2WAT_5 0x00005000 /* Bank 2 Write Access Time = 5 cycles */
-#define B2WAT_6 0x00006000 /* Bank 2 Write Access Time = 6 cycles */
-#define B2WAT_7 0x00007000 /* Bank 2 Write Access Time = 7 cycles */
-#define B2WAT_8 0x00008000 /* Bank 2 Write Access Time = 8 cycles */
-#define B2WAT_9 0x00009000 /* Bank 2 Write Access Time = 9 cycles */
-#define B2WAT_10 0x0000A000 /* Bank 2 Write Access Time = 10 cycles */
-#define B2WAT_11 0x0000B000 /* Bank 2 Write Access Time = 11 cycles */
-#define B2WAT_12 0x0000C000 /* Bank 2 Write Access Time = 12 cycles */
-#define B2WAT_13 0x0000D000 /* Bank 2 Write Access Time = 13 cycles */
-#define B2WAT_14 0x0000E000 /* Bank 2 Write Access Time = 14 cycles */
-#define B2WAT_15 0x0000F000 /* Bank 2 Write Access Time = 15 cycles */
-#define B3RDYEN 0x00010000 /* Bank 3 RDY enable, 0=disable, 1=enable */
-#define B3RDYPOL 0x00020000 /* Bank 3 RDY Active high, 0=active low, 1=active high */
-#define B3TT_1 0x00040000 /* Bank 3 Transition Time from Read to Write = 1 cycle */
-#define B3TT_2 0x00080000 /* Bank 3 Transition Time from Read to Write = 2 cycles */
-#define B3TT_3 0x000C0000 /* Bank 3 Transition Time from Read to Write = 3 cycles */
-#define B3TT_4 0x00000000 /* Bank 3 Transition Time from Read to Write = 4 cycles */
-#define B3ST_1 0x00100000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define B3ST_2 0x00200000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define B3ST_3 0x00300000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define B3ST_4 0x00000000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define B3HT_1 0x00400000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
-#define B3HT_2 0x00800000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
-#define B3HT_3 0x00C00000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
-#define B3HT_0 0x00000000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
-#define B3RAT_1 0x01000000 /* Bank 3 Read Access Time = 1 cycle */
-#define B3RAT_2 0x02000000 /* Bank 3 Read Access Time = 2 cycles */
-#define B3RAT_3 0x03000000 /* Bank 3 Read Access Time = 3 cycles */
-#define B3RAT_4 0x04000000 /* Bank 3 Read Access Time = 4 cycles */
-#define B3RAT_5 0x05000000 /* Bank 3 Read Access Time = 5 cycles */
-#define B3RAT_6 0x06000000 /* Bank 3 Read Access Time = 6 cycles */
-#define B3RAT_7 0x07000000 /* Bank 3 Read Access Time = 7 cycles */
-#define B3RAT_8 0x08000000 /* Bank 3 Read Access Time = 8 cycles */
-#define B3RAT_9 0x09000000 /* Bank 3 Read Access Time = 9 cycles */
-#define B3RAT_10 0x0A000000 /* Bank 3 Read Access Time = 10 cycles */
-#define B3RAT_11 0x0B000000 /* Bank 3 Read Access Time = 11 cycles */
-#define B3RAT_12 0x0C000000 /* Bank 3 Read Access Time = 12 cycles */
-#define B3RAT_13 0x0D000000 /* Bank 3 Read Access Time = 13 cycles */
-#define B3RAT_14 0x0E000000 /* Bank 3 Read Access Time = 14 cycles */
-#define B3RAT_15 0x0F000000 /* Bank 3 Read Access Time = 15 cycles */
-#define B3WAT_1 0x10000000 /* Bank 3 Write Access Time = 1 cycle */
-#define B3WAT_2 0x20000000 /* Bank 3 Write Access Time = 2 cycles */
-#define B3WAT_3 0x30000000 /* Bank 3 Write Access Time = 3 cycles */
-#define B3WAT_4 0x40000000 /* Bank 3 Write Access Time = 4 cycles */
-#define B3WAT_5 0x50000000 /* Bank 3 Write Access Time = 5 cycles */
-#define B3WAT_6 0x60000000 /* Bank 3 Write Access Time = 6 cycles */
-#define B3WAT_7 0x70000000 /* Bank 3 Write Access Time = 7 cycles */
-#define B3WAT_8 0x80000000 /* Bank 3 Write Access Time = 8 cycles */
-#define B3WAT_9 0x90000000 /* Bank 3 Write Access Time = 9 cycles */
-#define B3WAT_10 0xA0000000 /* Bank 3 Write Access Time = 10 cycles */
-#define B3WAT_11 0xB0000000 /* Bank 3 Write Access Time = 11 cycles */
-#define B3WAT_12 0xC0000000 /* Bank 3 Write Access Time = 12 cycles */
-#define B3WAT_13 0xD0000000 /* Bank 3 Write Access Time = 13 cycles */
-#define B3WAT_14 0xE0000000 /* Bank 3 Write Access Time = 14 cycles */
-#define B3WAT_15 0xF0000000 /* Bank 3 Write Access Time = 15 cycles */
-
-/* ********************** SDRAM CONTROLLER MASKS *************************** */
-/* EBIU_SDGCTL Masks */
-#define SCTLE 0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
-#define CL_2 0x00000008 /* SDRAM CAS latency = 2 cycles */
-#define CL_3 0x0000000C /* SDRAM CAS latency = 3 cycles */
-#define PFE 0x00000010 /* Enable SDRAM prefetch */
-#define PFP 0x00000020 /* Prefetch has priority over AMC requests */
-#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
-#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
-#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
-#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
-#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
-#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
-#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
-#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
-#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
-#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
-#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
-#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
-#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
-#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
-#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
-#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
-#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
-#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
-#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
-#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
-#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
-#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
-#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
-#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
-#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
-#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
-#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
-#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
-#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
-#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
-#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
-#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
-#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
-#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
-#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
-#define PUPSD 0x00200000 /*Power-up start delay */
-#define PSM 0x00400000 /* SDRAM power-up sequence = Precharge, mode register set, 8 CBR refresh cycles */
-#define PSS 0x00800000 /* enable SDRAM power-up sequence on next SDRAM access */
-#define SRFS 0x01000000 /* Start SDRAM self-refresh mode */
-#define EBUFE 0x02000000 /* Enable external buffering timing */
-#define FBBRW 0x04000000 /* Fast back-to-back read write enable */
-#define EMREN 0x10000000 /* Extended mode register enable */
-#define TCSR 0x20000000 /* Temp compensated self refresh value 85 deg C */
-#define CDDBG 0x40000000 /* Tristate SDRAM controls during bus grant */
-
-/* EBIU_SDBCTL Masks */
-#define EBE 0x00000001 /* Enable SDRAM external bank */
-#define EBSZ_16 0x00000000 /* SDRAM external bank size = 16MB */
-#define EBSZ_32 0x00000002 /* SDRAM external bank size = 32MB */
-#define EBSZ_64 0x00000004 /* SDRAM external bank size = 64MB */
-#define EBSZ_128 0x00000006 /* SDRAM external bank size = 128MB */
-#define EBSZ_256 0x00000008 /* SDRAM External Bank Size = 256MB */
-#define EBSZ_512 0x0000000A /* SDRAM External Bank Size = 512MB */
-#define EBCAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */
-#define EBCAW_9 0x00000010 /* SDRAM external bank column address width = 9 bits */
-#define EBCAW_10 0x00000020 /* SDRAM external bank column address width = 9 bits */
-#define EBCAW_11 0x00000030 /* SDRAM external bank column address width = 9 bits */
-
-/* EBIU_SDSTAT Masks */
-#define SDCI 0x00000001 /* SDRAM controller is idle */
-#define SDSRA 0x00000002 /* SDRAM SDRAM self refresh is active */
-#define SDPUA 0x00000004 /* SDRAM power up active */
-#define SDRS 0x00000008 /* SDRAM is in reset state */
-#define SDEASE 0x00000010 /* SDRAM EAB sticky error status - W1C */
-#define BGSTAT 0x00000020 /* Bus granted */
-
-
-/* ******************** TWO-WIRE INTERFACE (TWIx) MASKS ***********************/
-/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
-#ifdef _MISRA_RULES
-#define CLKLOW(x) ((x) & 0xFFu) /* Periods Clock Is Held Low */
-#define CLKHI(y) (((y)&0xFFu)<<0x8) /* Periods Before New Clock Low */
-#else
-#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
-#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
-#endif /* _MISRA_RULES */
-
-/* TWIx_PRESCALE Masks */
-#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
-#define TWI_ENA 0x0080 /* TWI Enable */
-#define SCCB 0x0200 /* SCCB Compatibility Enable */
-
-/* TWIx_SLAVE_CTRL Masks */
-#define SEN 0x0001 /* Slave Enable */
-#define SADD_LEN 0x0002 /* Slave Address Length */
-#define STDVAL 0x0004 /* Slave Transmit Data Valid */
-#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
-#define GEN 0x0010 /* General Call Adrress Matching Enabled */
-
-/* TWIx_SLAVE_STAT Masks */
-#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
-#define GCALL 0x0002 /* General Call Indicator */
-
-/* TWIx_MASTER_CTRL Masks */
-#define MEN 0x0001 /* Master Mode Enable */
-#define MADD_LEN 0x0002 /* Master Address Length */
-#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
-#define FAST 0x0008 /* Use Fast Mode Timing Specs */
-#define STOP 0x0010 /* Issue Stop Condition */
-#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
-#define DCNT 0x3FC0 /* Data Bytes To Transfer */
-#define SDAOVR 0x4000 /* Serial Data Override */
-#define SCLOVR 0x8000 /* Serial Clock Override */
-
-/* TWIx_MASTER_STAT Masks */
-#define MPROG 0x0001 /* Master Transfer In Progress */
-#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
-#define ANAK 0x0004 /* Address Not Acknowledged */
-#define DNAK 0x0008 /* Data Not Acknowledged */
-#define BUFRDERR 0x0010 /* Buffer Read Error */
-#define BUFWRERR 0x0020 /* Buffer Write Error */
-#define SDASEN 0x0040 /* Serial Data Sense */
-#define SCLSEN 0x0080 /* Serial Clock Sense */
-#define BUSBUSY 0x0100 /* Bus Busy Indicator */
-
-/* TWIx_INT_SRC and TWIx_INT_ENABLE Masks */
-#define SINIT 0x0001 /* Slave Transfer Initiated */
-#define SCOMP 0x0002 /* Slave Transfer Complete */
-#define SERR 0x0004 /* Slave Transfer Error */
-#define SOVF 0x0008 /* Slave Overflow */
-#define MCOMP 0x0010 /* Master Transfer Complete */
-#define MERR 0x0020 /* Master Transfer Error */
-#define XMTSERV 0x0040 /* Transmit FIFO Service */
-#define RCVSERV 0x0080 /* Receive FIFO Service */
-
-/* TWIx_FIFO_CTL Masks */
-#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
-#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
-#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
-#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
-
-/* TWIx_FIFO_STAT Masks */
-#define XMTSTAT 0x0003 /* Transmit FIFO Status */
-#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
-#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
-#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
-
-#define RCVSTAT 0x000C /* Receive FIFO Status */
-#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
-#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
-#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-
#endif /* _DEF_BF539_H */
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index bd9adb7183d..8a5beeece99 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -70,4 +70,9 @@
#define PORT_D GPIO_PD0
#define PORT_E GPIO_PE0
+#include <mach-common/ports-c.h>
+#include <mach-common/ports-d.h>
+#include <mach-common/ports-e.h>
+#include <mach-common/ports-f.h>
+
#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf538/include/mach/pll.h b/arch/blackfin/mach-bf538/include/mach/pll.h
index b30bbcd412a..94cca674d83 100644
--- a/arch/blackfin/mach-bf538/include/mach/pll.h
+++ b/arch/blackfin/mach-bf538/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2008-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_PLL_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
-
- bfin_write16(PLL_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_VR_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
-
- bfin_write16(VR_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 4c2ee678909..d11502ac562 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -156,7 +156,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -211,7 +211,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX,
#ifdef CONFIG_BFIN_UART1_CTSRTS
P_UART1_RTS, P_UART1_CTS,
@@ -258,7 +258,7 @@ static struct resource bfin_uart2_resources[] = {
},
};
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
P_UART2_TX, P_UART2_RX, 0
};
@@ -313,7 +313,7 @@ static struct resource bfin_uart3_resources[] = {
#endif
};
-unsigned short bfin_uart3_peripherals[] = {
+static unsigned short bfin_uart3_peripherals[] = {
P_UART3_TX, P_UART3_RX,
#ifdef CONFIG_BFIN_UART3_CTSRTS
P_UART3_RTS, P_UART3_CTS,
@@ -504,6 +504,7 @@ static struct musb_hdrc_config musb_config = {
* if it is the case.
*/
.gpio_vrsel_active = 1,
+ .clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
@@ -552,9 +553,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -586,9 +587,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
@@ -620,7 +621,7 @@ static struct resource bfin_sport2_uart_resources[] = {
},
};
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
};
@@ -654,7 +655,7 @@ static struct resource bfin_sport3_uart_resources[] = {
},
};
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
};
@@ -756,7 +757,7 @@ static struct platform_device bf54x_sdh_device = {
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 4f03fbc4c9b..ce5a2bb147d 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -261,7 +261,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
@@ -316,7 +316,7 @@ static struct resource bfin_uart1_resources[] = {
#endif
};
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX,
#ifdef CONFIG_BFIN_UART1_CTSRTS
P_UART1_RTS, P_UART1_CTS,
@@ -363,7 +363,7 @@ static struct resource bfin_uart2_resources[] = {
},
};
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
P_UART2_TX, P_UART2_RX, 0
};
@@ -418,7 +418,7 @@ static struct resource bfin_uart3_resources[] = {
#endif
};
-unsigned short bfin_uart3_peripherals[] = {
+static unsigned short bfin_uart3_peripherals[] = {
P_UART3_TX, P_UART3_RX,
#ifdef CONFIG_BFIN_UART3_CTSRTS
P_UART3_RTS, P_UART3_CTS,
@@ -609,6 +609,7 @@ static struct musb_hdrc_config musb_config = {
* if it is the case.
*/
.gpio_vrsel_active = 1,
+ .clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
@@ -657,9 +658,9 @@ static struct resource bfin_sport0_uart_resources[] = {
},
};
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
@@ -691,9 +692,9 @@ static struct resource bfin_sport1_uart_resources[] = {
},
};
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+ P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
@@ -725,7 +726,7 @@ static struct resource bfin_sport2_uart_resources[] = {
},
};
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
};
@@ -759,7 +760,7 @@ static struct resource bfin_sport3_uart_resources[] = {
},
};
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
};
@@ -777,7 +778,7 @@ static struct platform_device bfin_sport3_uart_device = {
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
diff --git a/arch/blackfin/mach-bf548/dma.c b/arch/blackfin/mach-bf548/dma.c
index 888b9cc0b82..69ead33cbf9 100644
--- a/arch/blackfin/mach-bf548/dma.c
+++ b/arch/blackfin/mach-bf548/dma.c
@@ -11,7 +11,7 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
new file mode 100644
index 00000000000..a77109f9972
--- /dev/null
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
@@ -0,0 +1,16 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS 4
+
+#define BFIN_UART_BF54X_STYLE
+
+#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
index dd44aa75fe7..0d94edaaaa2 100644
--- a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
@@ -4,72 +4,14 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/serial.h>
#include <asm/dma.h>
#include <asm/portmux.h>
-#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER_SET))
-#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR))
-#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-#define UART_GET_MSR(uart) bfin_read16(((uart)->port.membase + OFFSET_MSR))
-#define UART_GET_MCR(uart) bfin_read16(((uart)->port.membase + OFFSET_MCR))
-
-#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_SET_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER_SET),v)
-#define UART_CLEAR_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER_CLEAR),v)
-#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LSR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LSR),v)
-#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_CLEAR_LSR(uart) bfin_write16(((uart)->port.membase + OFFSET_LSR), -1)
-#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-#define UART_PUT_MCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_MCR),v)
-#define UART_CLEAR_SCTS(uart) bfin_write16(((uart)->port.membase + OFFSET_MSR),SCTS)
-
-#define UART_SET_DLAB(uart) /* MMRs not muxed on BF54x */
-#define UART_CLEAR_DLAB(uart) /* MMRs not muxed on BF54x */
-
-#define UART_GET_CTS(x) (UART_GET_MSR(x) & CTS)
-#define UART_DISABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS|MRTS))
-#define UART_ENABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
-#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
-
#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) || \
defined(CONFIG_BFIN_UART2_CTSRTS) || defined(CONFIG_BFIN_UART3_CTSRTS)
# define CONFIG_SERIAL_BFIN_HARD_CTSRTS
#endif
-#define BFIN_UART_TX_FIFO_SIZE 2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
- struct uart_port port;
- unsigned int old_status;
- int status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- int tx_done;
- int tx_count;
- struct circ_buf rx_dma_buf;
- struct timer_list rx_dma_timer;
- int rx_dma_nrows;
- unsigned int tx_dma_channel;
- unsigned int rx_dma_channel;
- struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- int scts;
- int cts_pin;
- int rts_pin;
-#endif
-};
-
struct bfin_serial_res {
unsigned long uart_base_addr;
int uart_irq;
@@ -148,3 +90,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
};
#define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index 5684030ccc2..72da721a77f 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2009 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -10,58 +10,40 @@
#include "bf548.h"
#include "anomaly.h"
+#include <asm/def_LPBlackfin.h>
#ifdef CONFIG_BF542
-#include "defBF542.h"
+# include "defBF542.h"
#endif
-
#ifdef CONFIG_BF544
-#include "defBF544.h"
+# include "defBF544.h"
#endif
-
#ifdef CONFIG_BF547
-#include "defBF547.h"
+# include "defBF547.h"
#endif
-
#ifdef CONFIG_BF548
-#include "defBF548.h"
+# include "defBF548.h"
#endif
-
#ifdef CONFIG_BF549
-#include "defBF549.h"
-#endif
-
-#if !defined(__ASSEMBLY__)
-#ifdef CONFIG_BF542
-#include "cdefBF542.h"
+# include "defBF549.h"
+#endif
+
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF542
+# include "cdefBF542.h"
+# endif
+# ifdef CONFIG_BF544
+# include "cdefBF544.h"
+# endif
+# ifdef CONFIG_BF547
+# include "cdefBF547.h"
+# endif
+# ifdef CONFIG_BF548
+# include "cdefBF548.h"
+# endif
+# ifdef CONFIG_BF549
+# include "cdefBF549.h"
+# endif
#endif
-#ifdef CONFIG_BF544
-#include "cdefBF544.h"
-#endif
-#ifdef CONFIG_BF547
-#include "cdefBF547.h"
-#endif
-#ifdef CONFIG_BF548
-#include "cdefBF548.h"
-#endif
-#ifdef CONFIG_BF549
-#include "cdefBF549.h"
-#endif
-
-#endif
-
-#define BFIN_UART_NR_PORTS 4
-
-#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
-#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
-#define OFFSET_GCTL 0x08 /* Global Control Register */
-#define OFFSET_LCR 0x0C /* Line Control Register */
-#define OFFSET_MCR 0x10 /* Modem Control Register */
-#define OFFSET_LSR 0x14 /* Line Status Register */
-#define OFFSET_MSR 0x18 /* Modem Status Register */
-#define OFFSET_SCR 0x1C /* SCR Scratch Register */
-#define OFFSET_IER_SET 0x20 /* Set Interrupt Enable Register */
-#define OFFSET_IER_CLEAR 0x24 /* Clear Interrupt Enable Register */
-#define OFFSET_THR 0x28 /* Transmit Holding register */
-#define OFFSET_RBR 0x2C /* Receive Buffer register */
#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
index 42f4a946954..d09c19cd1b7 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF542_H
#define _CDEF_BF542_H
-/* include all Core registers and bit definitions */
-#include "defBF542.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
-
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
index 2207799575f..33ec8102ced 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF544_H
#define _CDEF_BF544_H
-/* include all Core registers and bit definitions */
-#include "defBF544.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
-
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
index bc650e6ea48..bcb9726dea5 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF547_H
#define _CDEF_BF547_H
-/* include all Core registers and bit definitions */
-#include "defBF547.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
-
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
index 3523e08f796..bae67a65633 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF548_H
#define _CDEF_BF548_H
-/* include all Core registers and bit definitions */
-#include "defBF548.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
-
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
index 80201ed41f8..002136ad5a4 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF549_H
#define _CDEF_BF549_H
-/* include all Core registers and bit definitions */
-#include "defBF549.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
-
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
index deaf5d6542d..50c89c8052f 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,10 +7,6 @@
#ifndef _CDEF_BF54X_H
#define _CDEF_BF54X_H
-#include <asm/blackfin.h>
-
-#include "defBF54x_base.h"
-
/* ************************************************************** */
/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF54x */
/* ************************************************************** */
@@ -2633,22 +2629,5 @@
/* Handshake MDMA is not defined in the shared file because it is not available on the ADSP-BF542 bfin_read_()rocessor */
-/* legacy definitions */
-#define bfin_read_EBIU_AMCBCTL0 bfin_read_EBIU_AMBCTL0
-#define bfin_write_EBIU_AMCBCTL0 bfin_write_EBIU_AMBCTL0
-#define bfin_read_EBIU_AMCBCTL1 bfin_read_EBIU_AMBCTL1
-#define bfin_write_EBIU_AMCBCTL1 bfin_write_EBIU_AMBCTL1
-#define bfin_read_PINT0_IRQ bfin_read_PINT0_REQUEST
-#define bfin_write_PINT0_IRQ bfin_write_PINT0_REQUEST
-#define bfin_read_PINT1_IRQ bfin_read_PINT1_REQUEST
-#define bfin_write_PINT1_IRQ bfin_write_PINT1_REQUEST
-#define bfin_read_PINT2_IRQ bfin_read_PINT2_REQUEST
-#define bfin_write_PINT2_IRQ bfin_write_PINT2_REQUEST
-#define bfin_read_PINT3_IRQ bfin_read_PINT3_REQUEST
-#define bfin_write_PINT3_IRQ bfin_write_PINT3_REQUEST
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
#endif /* _CDEF_BF54X_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF542.h b/arch/blackfin/mach-bf548/include/mach/defBF542.h
index abf5f750dd8..629bf216e2b 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF542.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,11 +7,6 @@
#ifndef _DEF_BF542_H
#define _DEF_BF542_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
-
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h
index e2771094de0..642468c1bcb 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,11 +7,6 @@
#ifndef _DEF_BF544_H
#define _DEF_BF544_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
-
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index be21ba5b3aa..2f3337cd311 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,11 +7,6 @@
#ifndef _DEF_BF547_H
#define _DEF_BF547_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
-
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF548.h b/arch/blackfin/mach-bf548/include/mach/defBF548.h
index 3fb33b040ab..3c7f1b69349 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF548.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,11 +7,6 @@
#ifndef _DEF_BF548_H
#define _DEF_BF548_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
-
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF549.h b/arch/blackfin/mach-bf548/include/mach/defBF549.h
index 5a04e6d4017..9a45cb6b30d 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF549.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -7,11 +7,6 @@
#ifndef _DEF_BF549_H
#define _DEF_BF549_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
-
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 78f91103f17..0867c2bedb4 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
@@ -1615,14 +1615,14 @@
#define CTYPE 0x40 /* DMA Channel Type */
#define PMAP 0xf000 /* Peripheral Mapped To This Channel */
-/* Bit masks for DMACx_TCPER */
+/* Bit masks for DMACx_TC_PER */
#define DCB_TRAFFIC_PERIOD 0xf /* DCB Traffic Control Period */
#define DEB_TRAFFIC_PERIOD 0xf0 /* DEB Traffic Control Period */
#define DAB_TRAFFIC_PERIOD 0x700 /* DAB Traffic Control Period */
#define MDMA_ROUND_ROBIN_PERIOD 0xf800 /* MDMA Round Robin Period */
-/* Bit masks for DMACx_TCCNT */
+/* Bit masks for DMACx_TC_CNT */
#define DCB_TRAFFIC_COUNT 0xf /* DCB Traffic Control Count */
#define DEB_TRAFFIC_COUNT 0xf0 /* DEB Traffic Control Count */
@@ -2172,68 +2172,6 @@
#define RCVDATA16 0xffff /* Receive FIFO 16-Bit Data */
-/* Bit masks for UARTx_LCR */
-
-#if 0
-/* conflicts with legacy one in last section */
-#define WLS 0x3 /* Word Length Select */
-#endif
-#define STB 0x4 /* Stop Bits */
-#define PEN 0x8 /* Parity Enable */
-#define EPS 0x10 /* Even Parity Select */
-#define STP 0x20 /* Sticky Parity */
-#define SB 0x40 /* Set Break */
-
-/* Bit masks for UARTx_MCR */
-
-#define XOFF 0x1 /* Transmitter Off */
-#define MRTS 0x2 /* Manual Request To Send */
-#define RFIT 0x4 /* Receive FIFO IRQ Threshold */
-#define RFRT 0x8 /* Receive FIFO RTS Threshold */
-#define LOOP_ENA 0x10 /* Loopback Mode Enable */
-#define FCPOL 0x20 /* Flow Control Pin Polarity */
-#define ARTS 0x40 /* Automatic Request To Send */
-#define ACTS 0x80 /* Automatic Clear To Send */
-
-/* Bit masks for UARTx_LSR */
-
-#define DR 0x1 /* Data Ready */
-#define OE 0x2 /* Overrun Error */
-#define PE 0x4 /* Parity Error */
-#define FE 0x8 /* Framing Error */
-#define BI 0x10 /* Break Interrupt */
-#define THRE 0x20 /* THR Empty */
-#define TEMT 0x40 /* Transmitter Empty */
-#define TFI 0x80 /* Transmission Finished Indicator */
-
-/* Bit masks for UARTx_MSR */
-
-#define SCTS 0x1 /* Sticky CTS */
-#define CTS 0x10 /* Clear To Send */
-#define RFCS 0x20 /* Receive FIFO Count Status */
-
-/* Bit masks for UARTx_IER_SET & UARTx_IER_CLEAR */
-
-#define ERBFI 0x1 /* Enable Receive Buffer Full Interrupt */
-#define ETBEI 0x2 /* Enable Transmit Buffer Empty Interrupt */
-#define ELSI 0x4 /* Enable Receive Status Interrupt */
-#define EDSSI 0x8 /* Enable Modem Status Interrupt */
-#define EDTPTI 0x10 /* Enable DMA Transmit PIRQ Interrupt */
-#define ETFI 0x20 /* Enable Transmission Finished Interrupt */
-#define ERFCI 0x40 /* Enable Receive FIFO Count Interrupt */
-
-/* Bit masks for UARTx_GCTL */
-
-#define UCEN 0x1 /* UART Enable */
-#define IREN 0x2 /* IrDA Mode Enable */
-#define TPOLC 0x4 /* IrDA TX Polarity Change */
-#define RPOLC 0x8 /* IrDA RX Polarity Change */
-#define FPE 0x10 /* Force Parity Error */
-#define FFE 0x20 /* Force Framing Error */
-#define EDBO 0x40 /* Enable Divide-by-One */
-#define EGLSI 0x80 /* Enable Global LS Interrupt */
-
-
/* ******************************************* */
/* MULTI BIT MACRO ENUMERATIONS */
/* ******************************************* */
@@ -2251,13 +2189,6 @@
#define WDTH_CAP 0x0002
#define EXT_CLK 0x0003
-/* UARTx_LCR bit field options */
-
-#define WLS_5 0x0000 /* 5 data bits */
-#define WLS_6 0x0001 /* 6 data bits */
-#define WLS_7 0x0002 /* 7 data bits */
-#define WLS_8 0x0003 /* 8 data bits */
-
/* PINTx Register Bit Definitions */
#define PIQ0 0x00000001
@@ -2300,240 +2231,6 @@
#define PIQ30 0x40000000
#define PIQ31 0x80000000
-/* PORT A Bit Definitions for the registers
-PORTA, PORTA_SET, PORTA_CLEAR,
-PORTA_DIR_SET, PORTA_DIR_CLEAR, PORTA_INEN,
-PORTA_FER registers
-*/
-
-#define PA0 0x0001
-#define PA1 0x0002
-#define PA2 0x0004
-#define PA3 0x0008
-#define PA4 0x0010
-#define PA5 0x0020
-#define PA6 0x0040
-#define PA7 0x0080
-#define PA8 0x0100
-#define PA9 0x0200
-#define PA10 0x0400
-#define PA11 0x0800
-#define PA12 0x1000
-#define PA13 0x2000
-#define PA14 0x4000
-#define PA15 0x8000
-
-/* PORT B Bit Definitions for the registers
-PORTB, PORTB_SET, PORTB_CLEAR,
-PORTB_DIR_SET, PORTB_DIR_CLEAR, PORTB_INEN,
-PORTB_FER registers
-*/
-
-#define PB0 0x0001
-#define PB1 0x0002
-#define PB2 0x0004
-#define PB3 0x0008
-#define PB4 0x0010
-#define PB5 0x0020
-#define PB6 0x0040
-#define PB7 0x0080
-#define PB8 0x0100
-#define PB9 0x0200
-#define PB10 0x0400
-#define PB11 0x0800
-#define PB12 0x1000
-#define PB13 0x2000
-#define PB14 0x4000
-
-
-/* PORT C Bit Definitions for the registers
-PORTC, PORTC_SET, PORTC_CLEAR,
-PORTC_DIR_SET, PORTC_DIR_CLEAR, PORTC_INEN,
-PORTC_FER registers
-*/
-
-
-#define PC0 0x0001
-#define PC1 0x0002
-#define PC2 0x0004
-#define PC3 0x0008
-#define PC4 0x0010
-#define PC5 0x0020
-#define PC6 0x0040
-#define PC7 0x0080
-#define PC8 0x0100
-#define PC9 0x0200
-#define PC10 0x0400
-#define PC11 0x0800
-#define PC12 0x1000
-#define PC13 0x2000
-
-
-/* PORT D Bit Definitions for the registers
-PORTD, PORTD_SET, PORTD_CLEAR,
-PORTD_DIR_SET, PORTD_DIR_CLEAR, PORTD_INEN,
-PORTD_FER registers
-*/
-
-#define PD0 0x0001
-#define PD1 0x0002
-#define PD2 0x0004
-#define PD3 0x0008
-#define PD4 0x0010
-#define PD5 0x0020
-#define PD6 0x0040
-#define PD7 0x0080
-#define PD8 0x0100
-#define PD9 0x0200
-#define PD10 0x0400
-#define PD11 0x0800
-#define PD12 0x1000
-#define PD13 0x2000
-#define PD14 0x4000
-#define PD15 0x8000
-
-/* PORT E Bit Definitions for the registers
-PORTE, PORTE_SET, PORTE_CLEAR,
-PORTE_DIR_SET, PORTE_DIR_CLEAR, PORTE_INEN,
-PORTE_FER registers
-*/
-
-
-#define PE0 0x0001
-#define PE1 0x0002
-#define PE2 0x0004
-#define PE3 0x0008
-#define PE4 0x0010
-#define PE5 0x0020
-#define PE6 0x0040
-#define PE7 0x0080
-#define PE8 0x0100
-#define PE9 0x0200
-#define PE10 0x0400
-#define PE11 0x0800
-#define PE12 0x1000
-#define PE13 0x2000
-#define PE14 0x4000
-#define PE15 0x8000
-
-/* PORT F Bit Definitions for the registers
-PORTF, PORTF_SET, PORTF_CLEAR,
-PORTF_DIR_SET, PORTF_DIR_CLEAR, PORTF_INEN,
-PORTF_FER registers
-*/
-
-
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* PORT G Bit Definitions for the registers
-PORTG, PORTG_SET, PORTG_CLEAR,
-PORTG_DIR_SET, PORTG_DIR_CLEAR, PORTG_INEN,
-PORTG_FER registers
-*/
-
-
-#define PG0 0x0001
-#define PG1 0x0002
-#define PG2 0x0004
-#define PG3 0x0008
-#define PG4 0x0010
-#define PG5 0x0020
-#define PG6 0x0040
-#define PG7 0x0080
-#define PG8 0x0100
-#define PG9 0x0200
-#define PG10 0x0400
-#define PG11 0x0800
-#define PG12 0x1000
-#define PG13 0x2000
-#define PG14 0x4000
-#define PG15 0x8000
-
-/* PORT H Bit Definitions for the registers
-PORTH, PORTH_SET, PORTH_CLEAR,
-PORTH_DIR_SET, PORTH_DIR_CLEAR, PORTH_INEN,
-PORTH_FER registers
-*/
-
-
-#define PH0 0x0001
-#define PH1 0x0002
-#define PH2 0x0004
-#define PH3 0x0008
-#define PH4 0x0010
-#define PH5 0x0020
-#define PH6 0x0040
-#define PH7 0x0080
-#define PH8 0x0100
-#define PH9 0x0200
-#define PH10 0x0400
-#define PH11 0x0800
-#define PH12 0x1000
-#define PH13 0x2000
-
-
-/* PORT I Bit Definitions for the registers
-PORTI, PORTI_SET, PORTI_CLEAR,
-PORTI_DIR_SET, PORTI_DIR_CLEAR, PORTI_INEN,
-PORTI_FER registers
-*/
-
-
-#define PI0 0x0001
-#define PI1 0x0002
-#define PI2 0x0004
-#define PI3 0x0008
-#define PI4 0x0010
-#define PI5 0x0020
-#define PI6 0x0040
-#define PI7 0x0080
-#define PI8 0x0100
-#define PI9 0x0200
-#define PI10 0x0400
-#define PI11 0x0800
-#define PI12 0x1000
-#define PI13 0x2000
-#define PI14 0x4000
-#define PI15 0x8000
-
-/* PORT J Bit Definitions for the registers
-PORTJ, PORTJ_SET, PORTJ_CLEAR,
-PORTJ_DIR_SET, PORTJ_DIR_CLEAR, PORTJ_INEN,
-PORTJ_FER registers
-*/
-
-
-#define PJ0 0x0001
-#define PJ1 0x0002
-#define PJ2 0x0004
-#define PJ3 0x0008
-#define PJ4 0x0010
-#define PJ5 0x0020
-#define PJ6 0x0040
-#define PJ7 0x0080
-#define PJ8 0x0100
-#define PJ9 0x0200
-#define PJ10 0x0400
-#define PJ11 0x0800
-#define PJ12 0x1000
-#define PJ13 0x2000
-
-
/* Port Muxing Bit Fields for PORTx_MUX Registers */
#define MUX0 0x00000003
@@ -2703,16 +2400,4 @@ PORTJ_FER registers
#define B3MAP_PIH 0x06000000 /* Map Port I High to Byte 3 */
#define B3MAP_PJH 0x07000000 /* Map Port J High to Byte 3 */
-
-/* for legacy compatibility */
-
-#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
-#define W1LMAX_MAX W1LMAX_MIN
-#define EBIU_AMCBCTL0 EBIU_AMBCTL0
-#define EBIU_AMCBCTL1 EBIU_AMBCTL1
-#define PINT0_IRQ PINT0_REQUEST
-#define PINT1_IRQ PINT1_REQUEST
-#define PINT2_IRQ PINT2_REQUEST
-#define PINT3_IRQ PINT3_REQUEST
-
#endif /* _DEF_BF54X_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 28037e33196..7db433514e3 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -200,4 +200,15 @@ struct gpio_port_s {
#endif
+#include <mach-common/ports-a.h>
+#include <mach-common/ports-b.h>
+#include <mach-common/ports-c.h>
+#include <mach-common/ports-d.h>
+#include <mach-common/ports-e.h>
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+#include <mach-common/ports-i.h>
+#include <mach-common/ports-j.h>
+
#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 1f99b51a3d5..99fd1b2c53d 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -474,4 +474,26 @@ Events (highest priority) EMU 0
#define IRQ_PINT2_POS 24
#define IRQ_PINT3_POS 28
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/*
+ * bfin pint registers layout
+ */
+struct bfin_pint_regs {
+ u32 mask_set;
+ u32 mask_clear;
+ u32 irq;
+ u32 assign;
+ u32 edge_set;
+ u32 edge_clear;
+ u32 invert_set;
+ u32 invert_clear;
+ u32 pinstate;
+ u32 latch;
+ u32 __pad0[2];
+};
+
+#endif
+
#endif /* _BF548_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/pll.h b/arch/blackfin/mach-bf548/include/mach/pll.h
index 7865a090d33..94cca674d83 100644
--- a/arch/blackfin/mach-bf548/include/mach/pll.h
+++ b/arch/blackfin/mach-bf548/include/mach/pll.h
@@ -1,69 +1 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1, iwr2;
-
- if (val == bfin_read_PLL_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- iwr2 = bfin_read32(SIC_IWR2);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
- bfin_write32(SIC_IWR2, 0);
-
- bfin_write16(PLL_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- bfin_write32(SIC_IWR2, iwr2);
- hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
- unsigned long flags, iwr0, iwr1, iwr2;
-
- if (val == bfin_read_VR_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SIC_IWR0);
- iwr1 = bfin_read32(SIC_IWR1);
- iwr2 = bfin_read32(SIC_IWR2);
- /* Only allow PPL Wakeup) */
- bfin_write32(SIC_IWR0, IWR_ENABLE(0));
- bfin_write32(SIC_IWR1, 0);
- bfin_write32(SIC_IWR2, 0);
-
- bfin_write16(VR_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SIC_IWR0, iwr0);
- bfin_write32(SIC_IWR1, iwr1);
- bfin_write32(SIC_IWR2, iwr2);
- hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index f99f174b129..52d6f73fcce 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -49,6 +49,7 @@ ENTRY(_get_core_lock)
jump .Lretry_corelock
.Ldone_corelock:
p0 = r1;
+ /* flush core internal write buffer before invalidate dcache */
CSYNC(r2);
flushinv[p0];
SSYNC(r2);
@@ -685,6 +686,8 @@ ENTRY(___raw_atomic_test_asm)
r1 = -L1_CACHE_BYTES;
r1 = r0 & r1;
p0 = r1;
+ /* flush core internal write buffer before invalidate dcache */
+ CSYNC(r2);
flushinv[p0];
SSYNC(r2);
r0 = [p1];
@@ -907,6 +910,8 @@ ENTRY(___raw_uncached_fetch_asm)
r1 = -L1_CACHE_BYTES;
r1 = r0 & r1;
p0 = r1;
+ /* flush core internal write buffer before invalidate dcache */
+ CSYNC(r2);
flushinv[p0];
SSYNC(r2);
r0 = [p1];
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 0b1c20f14fe..3926cd909b6 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -224,7 +224,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 087b6b05cc7..3b67929d4c0 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -334,7 +334,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index ab7a487975f..f667e770419 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -190,7 +190,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index d3017e53686..bb056e60f6e 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -72,7 +72,7 @@ static struct resource bfin_uart0_resources[] = {
},
};
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
diff --git a/arch/blackfin/mach-bf561/dma.c b/arch/blackfin/mach-bf561/dma.c
index c938c3c7355..8ffdd6b4a24 100644
--- a/arch/blackfin/mach-bf561/dma.c
+++ b/arch/blackfin/mach-bf561/dma.c
@@ -11,7 +11,7 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA1_0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_1_NEXT_DESC_PTR,
(struct dma_register *) DMA1_2_NEXT_DESC_PTR,
@@ -36,14 +36,14 @@ struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA2_9_NEXT_DESC_PTR,
(struct dma_register *) DMA2_10_NEXT_DESC_PTR,
(struct dma_register *) DMA2_11_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_D0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_S0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_D1_NEXT_DESC_PTR,
- (struct dma_register *) MDMA1_S1_NEXT_DESC_PTR,
- (struct dma_register *) MDMA2_D0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA2_S0_NEXT_DESC_PTR,
- (struct dma_register *) MDMA2_D1_NEXT_DESC_PTR,
- (struct dma_register *) MDMA2_S1_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
+ (struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
(struct dma_register *) IMDMA_D0_NEXT_DESC_PTR,
(struct dma_register *) IMDMA_S0_NEXT_DESC_PTR,
(struct dma_register *) IMDMA_D1_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf561/hotplug.c b/arch/blackfin/mach-bf561/hotplug.c
index c95169b612d..4cd3b28cd04 100644
--- a/arch/blackfin/mach-bf561/hotplug.c
+++ b/arch/blackfin/mach-bf561/hotplug.c
@@ -6,7 +6,9 @@
*/
#include <asm/blackfin.h>
+#include <asm/irq.h>
#include <asm/smp.h>
+
#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
int hotplug_coreb;
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 4c108c99cb6..6a3499b0209 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -181,7 +181,11 @@
/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
#define ANOMALY_05000254 (__SILICON_REVISION__ > 3)
/* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
-#define ANOMALY_05000257 (__SILICON_REVISION__ < 5)
+/* Tempoary work around for kgdb bug 6333 in SMP kernel. It looks coreb hangs in exception
+ * without handling anomaly 05000257 properly on bf561 v0.5. This work around may change
+ * after the behavior and the root cause are confirmed with hardware team.
+ */
+#define ANOMALY_05000257 (__SILICON_REVISION__ < 5 || (__SILICON_REVISION__ == 5 && CONFIG_SMP))
/* Instruction Cache Is Corrupted When Bits 9 and 12 of the ICPLB Data Registers Differ */
#define ANOMALY_05000258 (__SILICON_REVISION__ < 5)
/* ICPLB_STATUS MMR Register May Be Corrupted */
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
new file mode 100644
index 00000000000..08072c86d5d
--- /dev/null
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS 1
+
+#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
index e33e158bc16..3a6947456cf 100644
--- a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/serial.h>
#include <asm/dma.h>
#include <asm/portmux.h>
-#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
#ifdef CONFIG_BFIN_UART0_CTSRTS
# define CONFIG_SERIAL_BFIN_CTSRTS
# ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
# endif
#endif
-#define BFIN_UART_TX_FIFO_SIZE 2
-
-struct bfin_serial_port {
- struct uart_port port;
- unsigned int old_status;
- int status_irq;
- unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- int tx_done;
- int tx_count;
- struct circ_buf rx_dma_buf;
- struct timer_list rx_dma_timer;
- int rx_dma_nrows;
- unsigned int tx_dma_channel;
- unsigned int rx_dma_channel;
- struct work_struct tx_dma_workqueue;
-#else
-# if ANOMALY_05000363
- unsigned int anomaly_threshold;
-# endif
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- struct timer_list cts_timer;
- int cts_pin;
- int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
- unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
- uart->lsr |= (lsr & (BI|FE|PE|OE));
- return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
- uart->lsr = 0;
- bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
struct bfin_serial_res {
unsigned long uart_base_addr;
int uart_irq;
@@ -120,3 +48,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
};
#define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index 6c7dc58c018..dc470534c08 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -10,11 +10,14 @@
#define BF561_FAMILY
#include "bf561.h"
-#include "defBF561.h"
#include "anomaly.h"
-#if !defined(__ASSEMBLY__)
-#include "cdefBF561.h"
+#include <asm/def_LPBlackfin.h>
+#include "defBF561.h"
+
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# include "cdefBF561.h"
#endif
#define bfin_read_FIO_FLAG_D() bfin_read_FIO0_FLAG_D()
@@ -35,19 +38,4 @@
#define bfin_read_SICB_ISR(x) bfin_read32(__SIC_MUX(SICB_ISR0, x))
#define bfin_write_SICB_ISR(x, val) bfin_write32(__SIC_MUX(SICB_ISR0, x), val)
-#define BFIN_UART_NR_PORTS 1
-
-#define OFFSET_THR 0x00 /* Transmit Holding register */
-#define OFFSET_RBR 0x00 /* Receive Buffer register */
-#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
-#define OFFSET_IER 0x04 /* Interrupt Enable Register */
-#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
-#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
-#define OFFSET_LCR 0x0C /* Line Control Register */
-#define OFFSET_MCR 0x10 /* Modem Control Register */
-#define OFFSET_LSR 0x14 /* Line Status Register */
-#define OFFSET_MSR 0x18 /* Modem Status Register */
-#define OFFSET_SCR 0x1C /* SCR Scratch Register */
-#define OFFSET_GCTL 0x24 /* Global Control Register */
-
#endif /* _MACH_BLACKFIN_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
index 2bab9915249..75333159720 100644
--- a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,14 +7,6 @@
#ifndef _CDEF_BF561_H
#define _CDEF_BF561_H
-#include <asm/blackfin.h>
-
-/* include all Core registers and bit definitions */
-#include "defBF561.h"
-
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
/*********************************************************************************** */
/* System MMR Register Map */
/*********************************************************************************** */
@@ -523,14 +515,14 @@
#define bfin_read_PPI1_FRAME() bfin_read16(PPI1_FRAME)
#define bfin_write_PPI1_FRAME(val) bfin_write16(PPI1_FRAME,val)
/*DMA traffic control registers */
-#define bfin_read_DMA1_TC_PER() bfin_read16(DMA1_TC_PER)
-#define bfin_write_DMA1_TC_PER(val) bfin_write16(DMA1_TC_PER,val)
-#define bfin_read_DMA1_TC_CNT() bfin_read16(DMA1_TC_CNT)
-#define bfin_write_DMA1_TC_CNT(val) bfin_write16(DMA1_TC_CNT,val)
-#define bfin_read_DMA2_TC_PER() bfin_read16(DMA2_TC_PER)
-#define bfin_write_DMA2_TC_PER(val) bfin_write16(DMA2_TC_PER,val)
-#define bfin_read_DMA2_TC_CNT() bfin_read16(DMA2_TC_CNT)
-#define bfin_write_DMA2_TC_CNT(val) bfin_write16(DMA2_TC_CNT,val)
+#define bfin_read_DMAC0_TC_PER() bfin_read16(DMAC0_TC_PER)
+#define bfin_write_DMAC0_TC_PER(val) bfin_write16(DMAC0_TC_PER,val)
+#define bfin_read_DMAC0_TC_CNT() bfin_read16(DMAC0_TC_CNT)
+#define bfin_write_DMAC0_TC_CNT(val) bfin_write16(DMAC0_TC_CNT,val)
+#define bfin_read_DMAC1_TC_PER() bfin_read16(DMAC1_TC_PER)
+#define bfin_write_DMAC1_TC_PER(val) bfin_write16(DMAC1_TC_PER,val)
+#define bfin_read_DMAC1_TC_CNT() bfin_read16(DMAC1_TC_CNT)
+#define bfin_write_DMAC1_TC_CNT(val) bfin_write16(DMAC1_TC_CNT,val)
/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
#define bfin_read_DMA1_0_CONFIG() bfin_read16(DMA1_0_CONFIG)
#define bfin_write_DMA1_0_CONFIG(val) bfin_write16(DMA1_0_CONFIG,val)
@@ -845,110 +837,110 @@
#define bfin_read_DMA1_11_PERIPHERAL_MAP() bfin_read16(DMA1_11_PERIPHERAL_MAP)
#define bfin_write_DMA1_11_PERIPHERAL_MAP(val) bfin_write16(DMA1_11_PERIPHERAL_MAP,val)
/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
-#define bfin_read_MDMA1_D0_CONFIG() bfin_read16(MDMA1_D0_CONFIG)
-#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG,val)
-#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_read32(MDMA1_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_D0_START_ADDR() bfin_read32(MDMA1_D0_START_ADDR)
-#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_write32(MDMA1_D0_START_ADDR,val)
-#define bfin_read_MDMA1_D0_X_COUNT() bfin_read16(MDMA1_D0_X_COUNT)
-#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT,val)
-#define bfin_read_MDMA1_D0_Y_COUNT() bfin_read16(MDMA1_D0_Y_COUNT)
-#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT,val)
-#define bfin_read_MDMA1_D0_X_MODIFY() bfin_read16(MDMA1_D0_X_MODIFY)
-#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY,val)
-#define bfin_read_MDMA1_D0_Y_MODIFY() bfin_read16(MDMA1_D0_Y_MODIFY)
-#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY,val)
-#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_read32(MDMA1_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_write32(MDMA1_D0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_read32(MDMA1_D0_CURR_ADDR)
-#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_write32(MDMA1_D0_CURR_ADDR,val)
-#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT)
-#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS)
-#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS,val)
-#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_S0_CONFIG() bfin_read16(MDMA1_S0_CONFIG)
-#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG,val)
-#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_read32(MDMA1_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_S0_START_ADDR() bfin_read32(MDMA1_S0_START_ADDR)
-#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_write32(MDMA1_S0_START_ADDR,val)
-#define bfin_read_MDMA1_S0_X_COUNT() bfin_read16(MDMA1_S0_X_COUNT)
-#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT,val)
-#define bfin_read_MDMA1_S0_Y_COUNT() bfin_read16(MDMA1_S0_Y_COUNT)
-#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT,val)
-#define bfin_read_MDMA1_S0_X_MODIFY() bfin_read16(MDMA1_S0_X_MODIFY)
-#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY,val)
-#define bfin_read_MDMA1_S0_Y_MODIFY() bfin_read16(MDMA1_S0_Y_MODIFY)
-#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY,val)
-#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_read32(MDMA1_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_write32(MDMA1_S0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_read32(MDMA1_S0_CURR_ADDR)
-#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_write32(MDMA1_S0_CURR_ADDR,val)
-#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT)
-#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS)
-#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS,val)
-#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_D1_CONFIG() bfin_read16(MDMA1_D1_CONFIG)
-#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG,val)
-#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_read32(MDMA1_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_D1_START_ADDR() bfin_read32(MDMA1_D1_START_ADDR)
-#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_write32(MDMA1_D1_START_ADDR,val)
-#define bfin_read_MDMA1_D1_X_COUNT() bfin_read16(MDMA1_D1_X_COUNT)
-#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT,val)
-#define bfin_read_MDMA1_D1_Y_COUNT() bfin_read16(MDMA1_D1_Y_COUNT)
-#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT,val)
-#define bfin_read_MDMA1_D1_X_MODIFY() bfin_read16(MDMA1_D1_X_MODIFY)
-#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY,val)
-#define bfin_read_MDMA1_D1_Y_MODIFY() bfin_read16(MDMA1_D1_Y_MODIFY)
-#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY,val)
-#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_read32(MDMA1_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_write32(MDMA1_D1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_read32(MDMA1_D1_CURR_ADDR)
-#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_write32(MDMA1_D1_CURR_ADDR,val)
-#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT)
-#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS)
-#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS,val)
-#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_S1_CONFIG() bfin_read16(MDMA1_S1_CONFIG)
-#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG,val)
-#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_read32(MDMA1_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_S1_START_ADDR() bfin_read32(MDMA1_S1_START_ADDR)
-#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_write32(MDMA1_S1_START_ADDR,val)
-#define bfin_read_MDMA1_S1_X_COUNT() bfin_read16(MDMA1_S1_X_COUNT)
-#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT,val)
-#define bfin_read_MDMA1_S1_Y_COUNT() bfin_read16(MDMA1_S1_Y_COUNT)
-#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT,val)
-#define bfin_read_MDMA1_S1_X_MODIFY() bfin_read16(MDMA1_S1_X_MODIFY)
-#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY,val)
-#define bfin_read_MDMA1_S1_Y_MODIFY() bfin_read16(MDMA1_S1_Y_MODIFY)
-#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY,val)
-#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_read32(MDMA1_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_write32(MDMA1_S1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_read32(MDMA1_S1_CURR_ADDR)
-#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_write32(MDMA1_S1_CURR_ADDR,val)
-#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT)
-#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS)
-#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS,val)
-#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D2_CONFIG() bfin_read16(MDMA_D2_CONFIG)
+#define bfin_write_MDMA_D2_CONFIG(val) bfin_write16(MDMA_D2_CONFIG,val)
+#define bfin_read_MDMA_D2_NEXT_DESC_PTR() bfin_read32(MDMA_D2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_write32(MDMA_D2_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D2_START_ADDR() bfin_read32(MDMA_D2_START_ADDR)
+#define bfin_write_MDMA_D2_START_ADDR(val) bfin_write32(MDMA_D2_START_ADDR,val)
+#define bfin_read_MDMA_D2_X_COUNT() bfin_read16(MDMA_D2_X_COUNT)
+#define bfin_write_MDMA_D2_X_COUNT(val) bfin_write16(MDMA_D2_X_COUNT,val)
+#define bfin_read_MDMA_D2_Y_COUNT() bfin_read16(MDMA_D2_Y_COUNT)
+#define bfin_write_MDMA_D2_Y_COUNT(val) bfin_write16(MDMA_D2_Y_COUNT,val)
+#define bfin_read_MDMA_D2_X_MODIFY() bfin_read16(MDMA_D2_X_MODIFY)
+#define bfin_write_MDMA_D2_X_MODIFY(val) bfin_write16(MDMA_D2_X_MODIFY,val)
+#define bfin_read_MDMA_D2_Y_MODIFY() bfin_read16(MDMA_D2_Y_MODIFY)
+#define bfin_write_MDMA_D2_Y_MODIFY(val) bfin_write16(MDMA_D2_Y_MODIFY,val)
+#define bfin_read_MDMA_D2_CURR_DESC_PTR() bfin_read32(MDMA_D2_CURR_DESC_PTR)
+#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_write32(MDMA_D2_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D2_CURR_ADDR() bfin_read32(MDMA_D2_CURR_ADDR)
+#define bfin_write_MDMA_D2_CURR_ADDR(val) bfin_write32(MDMA_D2_CURR_ADDR,val)
+#define bfin_read_MDMA_D2_CURR_X_COUNT() bfin_read16(MDMA_D2_CURR_X_COUNT)
+#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D2_CURR_Y_COUNT() bfin_read16(MDMA_D2_CURR_Y_COUNT)
+#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D2_IRQ_STATUS() bfin_read16(MDMA_D2_IRQ_STATUS)
+#define bfin_write_MDMA_D2_IRQ_STATUS(val) bfin_write16(MDMA_D2_IRQ_STATUS,val)
+#define bfin_read_MDMA_D2_PERIPHERAL_MAP() bfin_read16(MDMA_D2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S2_CONFIG() bfin_read16(MDMA_S2_CONFIG)
+#define bfin_write_MDMA_S2_CONFIG(val) bfin_write16(MDMA_S2_CONFIG,val)
+#define bfin_read_MDMA_S2_NEXT_DESC_PTR() bfin_read32(MDMA_S2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_write32(MDMA_S2_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S2_START_ADDR() bfin_read32(MDMA_S2_START_ADDR)
+#define bfin_write_MDMA_S2_START_ADDR(val) bfin_write32(MDMA_S2_START_ADDR,val)
+#define bfin_read_MDMA_S2_X_COUNT() bfin_read16(MDMA_S2_X_COUNT)
+#define bfin_write_MDMA_S2_X_COUNT(val) bfin_write16(MDMA_S2_X_COUNT,val)
+#define bfin_read_MDMA_S2_Y_COUNT() bfin_read16(MDMA_S2_Y_COUNT)
+#define bfin_write_MDMA_S2_Y_COUNT(val) bfin_write16(MDMA_S2_Y_COUNT,val)
+#define bfin_read_MDMA_S2_X_MODIFY() bfin_read16(MDMA_S2_X_MODIFY)
+#define bfin_write_MDMA_S2_X_MODIFY(val) bfin_write16(MDMA_S2_X_MODIFY,val)
+#define bfin_read_MDMA_S2_Y_MODIFY() bfin_read16(MDMA_S2_Y_MODIFY)
+#define bfin_write_MDMA_S2_Y_MODIFY(val) bfin_write16(MDMA_S2_Y_MODIFY,val)
+#define bfin_read_MDMA_S2_CURR_DESC_PTR() bfin_read32(MDMA_S2_CURR_DESC_PTR)
+#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_write32(MDMA_S2_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S2_CURR_ADDR() bfin_read32(MDMA_S2_CURR_ADDR)
+#define bfin_write_MDMA_S2_CURR_ADDR(val) bfin_write32(MDMA_S2_CURR_ADDR,val)
+#define bfin_read_MDMA_S2_CURR_X_COUNT() bfin_read16(MDMA_S2_CURR_X_COUNT)
+#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S2_CURR_Y_COUNT() bfin_read16(MDMA_S2_CURR_Y_COUNT)
+#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S2_IRQ_STATUS() bfin_read16(MDMA_S2_IRQ_STATUS)
+#define bfin_write_MDMA_S2_IRQ_STATUS(val) bfin_write16(MDMA_S2_IRQ_STATUS,val)
+#define bfin_read_MDMA_S2_PERIPHERAL_MAP() bfin_read16(MDMA_S2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D3_CONFIG() bfin_read16(MDMA_D3_CONFIG)
+#define bfin_write_MDMA_D3_CONFIG(val) bfin_write16(MDMA_D3_CONFIG,val)
+#define bfin_read_MDMA_D3_NEXT_DESC_PTR() bfin_read32(MDMA_D3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_write32(MDMA_D3_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D3_START_ADDR() bfin_read32(MDMA_D3_START_ADDR)
+#define bfin_write_MDMA_D3_START_ADDR(val) bfin_write32(MDMA_D3_START_ADDR,val)
+#define bfin_read_MDMA_D3_X_COUNT() bfin_read16(MDMA_D3_X_COUNT)
+#define bfin_write_MDMA_D3_X_COUNT(val) bfin_write16(MDMA_D3_X_COUNT,val)
+#define bfin_read_MDMA_D3_Y_COUNT() bfin_read16(MDMA_D3_Y_COUNT)
+#define bfin_write_MDMA_D3_Y_COUNT(val) bfin_write16(MDMA_D3_Y_COUNT,val)
+#define bfin_read_MDMA_D3_X_MODIFY() bfin_read16(MDMA_D3_X_MODIFY)
+#define bfin_write_MDMA_D3_X_MODIFY(val) bfin_write16(MDMA_D3_X_MODIFY,val)
+#define bfin_read_MDMA_D3_Y_MODIFY() bfin_read16(MDMA_D3_Y_MODIFY)
+#define bfin_write_MDMA_D3_Y_MODIFY(val) bfin_write16(MDMA_D3_Y_MODIFY,val)
+#define bfin_read_MDMA_D3_CURR_DESC_PTR() bfin_read32(MDMA_D3_CURR_DESC_PTR)
+#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_write32(MDMA_D3_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D3_CURR_ADDR() bfin_read32(MDMA_D3_CURR_ADDR)
+#define bfin_write_MDMA_D3_CURR_ADDR(val) bfin_write32(MDMA_D3_CURR_ADDR,val)
+#define bfin_read_MDMA_D3_CURR_X_COUNT() bfin_read16(MDMA_D3_CURR_X_COUNT)
+#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D3_CURR_Y_COUNT() bfin_read16(MDMA_D3_CURR_Y_COUNT)
+#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D3_IRQ_STATUS() bfin_read16(MDMA_D3_IRQ_STATUS)
+#define bfin_write_MDMA_D3_IRQ_STATUS(val) bfin_write16(MDMA_D3_IRQ_STATUS,val)
+#define bfin_read_MDMA_D3_PERIPHERAL_MAP() bfin_read16(MDMA_D3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S3_CONFIG() bfin_read16(MDMA_S3_CONFIG)
+#define bfin_write_MDMA_S3_CONFIG(val) bfin_write16(MDMA_S3_CONFIG,val)
+#define bfin_read_MDMA_S3_NEXT_DESC_PTR() bfin_read32(MDMA_S3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_write32(MDMA_S3_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S3_START_ADDR() bfin_read32(MDMA_S3_START_ADDR)
+#define bfin_write_MDMA_S3_START_ADDR(val) bfin_write32(MDMA_S3_START_ADDR,val)
+#define bfin_read_MDMA_S3_X_COUNT() bfin_read16(MDMA_S3_X_COUNT)
+#define bfin_write_MDMA_S3_X_COUNT(val) bfin_write16(MDMA_S3_X_COUNT,val)
+#define bfin_read_MDMA_S3_Y_COUNT() bfin_read16(MDMA_S3_Y_COUNT)
+#define bfin_write_MDMA_S3_Y_COUNT(val) bfin_write16(MDMA_S3_Y_COUNT,val)
+#define bfin_read_MDMA_S3_X_MODIFY() bfin_read16(MDMA_S3_X_MODIFY)
+#define bfin_write_MDMA_S3_X_MODIFY(val) bfin_write16(MDMA_S3_X_MODIFY,val)
+#define bfin_read_MDMA_S3_Y_MODIFY() bfin_read16(MDMA_S3_Y_MODIFY)
+#define bfin_write_MDMA_S3_Y_MODIFY(val) bfin_write16(MDMA_S3_Y_MODIFY,val)
+#define bfin_read_MDMA_S3_CURR_DESC_PTR() bfin_read32(MDMA_S3_CURR_DESC_PTR)
+#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_write32(MDMA_S3_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S3_CURR_ADDR() bfin_read32(MDMA_S3_CURR_ADDR)
+#define bfin_write_MDMA_S3_CURR_ADDR(val) bfin_write32(MDMA_S3_CURR_ADDR,val)
+#define bfin_read_MDMA_S3_CURR_X_COUNT() bfin_read16(MDMA_S3_CURR_X_COUNT)
+#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S3_CURR_Y_COUNT() bfin_read16(MDMA_S3_CURR_Y_COUNT)
+#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S3_IRQ_STATUS() bfin_read16(MDMA_S3_IRQ_STATUS)
+#define bfin_write_MDMA_S3_IRQ_STATUS(val) bfin_write16(MDMA_S3_IRQ_STATUS,val)
+#define bfin_read_MDMA_S3_PERIPHERAL_MAP() bfin_read16(MDMA_S3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP,val)
/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
#define bfin_read_DMA2_0_CONFIG() bfin_read16(DMA2_0_CONFIG)
#define bfin_write_DMA2_0_CONFIG(val) bfin_write16(DMA2_0_CONFIG,val)
@@ -1263,110 +1255,110 @@
#define bfin_read_DMA2_11_PERIPHERAL_MAP() bfin_read16(DMA2_11_PERIPHERAL_MAP)
#define bfin_write_DMA2_11_PERIPHERAL_MAP(val) bfin_write16(DMA2_11_PERIPHERAL_MAP,val)
/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
-#define bfin_read_MDMA2_D0_CONFIG() bfin_read16(MDMA2_D0_CONFIG)
-#define bfin_write_MDMA2_D0_CONFIG(val) bfin_write16(MDMA2_D0_CONFIG,val)
-#define bfin_read_MDMA2_D0_NEXT_DESC_PTR() bfin_read32(MDMA2_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_D0_START_ADDR() bfin_read32(MDMA2_D0_START_ADDR)
-#define bfin_write_MDMA2_D0_START_ADDR(val) bfin_write32(MDMA2_D0_START_ADDR,val)
-#define bfin_read_MDMA2_D0_X_COUNT() bfin_read16(MDMA2_D0_X_COUNT)
-#define bfin_write_MDMA2_D0_X_COUNT(val) bfin_write16(MDMA2_D0_X_COUNT,val)
-#define bfin_read_MDMA2_D0_Y_COUNT() bfin_read16(MDMA2_D0_Y_COUNT)
-#define bfin_write_MDMA2_D0_Y_COUNT(val) bfin_write16(MDMA2_D0_Y_COUNT,val)
-#define bfin_read_MDMA2_D0_X_MODIFY() bfin_read16(MDMA2_D0_X_MODIFY)
-#define bfin_write_MDMA2_D0_X_MODIFY(val) bfin_write16(MDMA2_D0_X_MODIFY,val)
-#define bfin_read_MDMA2_D0_Y_MODIFY() bfin_read16(MDMA2_D0_Y_MODIFY)
-#define bfin_write_MDMA2_D0_Y_MODIFY(val) bfin_write16(MDMA2_D0_Y_MODIFY,val)
-#define bfin_read_MDMA2_D0_CURR_DESC_PTR() bfin_read32(MDMA2_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA2_D0_CURR_DESC_PTR(val) bfin_write32(MDMA2_D0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_D0_CURR_ADDR() bfin_read32(MDMA2_D0_CURR_ADDR)
-#define bfin_write_MDMA2_D0_CURR_ADDR(val) bfin_write32(MDMA2_D0_CURR_ADDR,val)
-#define bfin_read_MDMA2_D0_CURR_X_COUNT() bfin_read16(MDMA2_D0_CURR_X_COUNT)
-#define bfin_write_MDMA2_D0_CURR_X_COUNT(val) bfin_write16(MDMA2_D0_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_D0_CURR_Y_COUNT() bfin_read16(MDMA2_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA2_D0_CURR_Y_COUNT(val) bfin_write16(MDMA2_D0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_D0_IRQ_STATUS() bfin_read16(MDMA2_D0_IRQ_STATUS)
-#define bfin_write_MDMA2_D0_IRQ_STATUS(val) bfin_write16(MDMA2_D0_IRQ_STATUS,val)
-#define bfin_read_MDMA2_D0_PERIPHERAL_MAP() bfin_read16(MDMA2_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_S0_CONFIG() bfin_read16(MDMA2_S0_CONFIG)
-#define bfin_write_MDMA2_S0_CONFIG(val) bfin_write16(MDMA2_S0_CONFIG,val)
-#define bfin_read_MDMA2_S0_NEXT_DESC_PTR() bfin_read32(MDMA2_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_S0_START_ADDR() bfin_read32(MDMA2_S0_START_ADDR)
-#define bfin_write_MDMA2_S0_START_ADDR(val) bfin_write32(MDMA2_S0_START_ADDR,val)
-#define bfin_read_MDMA2_S0_X_COUNT() bfin_read16(MDMA2_S0_X_COUNT)
-#define bfin_write_MDMA2_S0_X_COUNT(val) bfin_write16(MDMA2_S0_X_COUNT,val)
-#define bfin_read_MDMA2_S0_Y_COUNT() bfin_read16(MDMA2_S0_Y_COUNT)
-#define bfin_write_MDMA2_S0_Y_COUNT(val) bfin_write16(MDMA2_S0_Y_COUNT,val)
-#define bfin_read_MDMA2_S0_X_MODIFY() bfin_read16(MDMA2_S0_X_MODIFY)
-#define bfin_write_MDMA2_S0_X_MODIFY(val) bfin_write16(MDMA2_S0_X_MODIFY,val)
-#define bfin_read_MDMA2_S0_Y_MODIFY() bfin_read16(MDMA2_S0_Y_MODIFY)
-#define bfin_write_MDMA2_S0_Y_MODIFY(val) bfin_write16(MDMA2_S0_Y_MODIFY,val)
-#define bfin_read_MDMA2_S0_CURR_DESC_PTR() bfin_read32(MDMA2_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA2_S0_CURR_DESC_PTR(val) bfin_write32(MDMA2_S0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_S0_CURR_ADDR() bfin_read32(MDMA2_S0_CURR_ADDR)
-#define bfin_write_MDMA2_S0_CURR_ADDR(val) bfin_write32(MDMA2_S0_CURR_ADDR,val)
-#define bfin_read_MDMA2_S0_CURR_X_COUNT() bfin_read16(MDMA2_S0_CURR_X_COUNT)
-#define bfin_write_MDMA2_S0_CURR_X_COUNT(val) bfin_write16(MDMA2_S0_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_S0_CURR_Y_COUNT() bfin_read16(MDMA2_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA2_S0_CURR_Y_COUNT(val) bfin_write16(MDMA2_S0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_S0_IRQ_STATUS() bfin_read16(MDMA2_S0_IRQ_STATUS)
-#define bfin_write_MDMA2_S0_IRQ_STATUS(val) bfin_write16(MDMA2_S0_IRQ_STATUS,val)
-#define bfin_read_MDMA2_S0_PERIPHERAL_MAP() bfin_read16(MDMA2_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_D1_CONFIG() bfin_read16(MDMA2_D1_CONFIG)
-#define bfin_write_MDMA2_D1_CONFIG(val) bfin_write16(MDMA2_D1_CONFIG,val)
-#define bfin_read_MDMA2_D1_NEXT_DESC_PTR() bfin_read32(MDMA2_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_D1_START_ADDR() bfin_read32(MDMA2_D1_START_ADDR)
-#define bfin_write_MDMA2_D1_START_ADDR(val) bfin_write32(MDMA2_D1_START_ADDR,val)
-#define bfin_read_MDMA2_D1_X_COUNT() bfin_read16(MDMA2_D1_X_COUNT)
-#define bfin_write_MDMA2_D1_X_COUNT(val) bfin_write16(MDMA2_D1_X_COUNT,val)
-#define bfin_read_MDMA2_D1_Y_COUNT() bfin_read16(MDMA2_D1_Y_COUNT)
-#define bfin_write_MDMA2_D1_Y_COUNT(val) bfin_write16(MDMA2_D1_Y_COUNT,val)
-#define bfin_read_MDMA2_D1_X_MODIFY() bfin_read16(MDMA2_D1_X_MODIFY)
-#define bfin_write_MDMA2_D1_X_MODIFY(val) bfin_write16(MDMA2_D1_X_MODIFY,val)
-#define bfin_read_MDMA2_D1_Y_MODIFY() bfin_read16(MDMA2_D1_Y_MODIFY)
-#define bfin_write_MDMA2_D1_Y_MODIFY(val) bfin_write16(MDMA2_D1_Y_MODIFY,val)
-#define bfin_read_MDMA2_D1_CURR_DESC_PTR() bfin_read32(MDMA2_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA2_D1_CURR_DESC_PTR(val) bfin_write32(MDMA2_D1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_D1_CURR_ADDR() bfin_read32(MDMA2_D1_CURR_ADDR)
-#define bfin_write_MDMA2_D1_CURR_ADDR(val) bfin_write32(MDMA2_D1_CURR_ADDR,val)
-#define bfin_read_MDMA2_D1_CURR_X_COUNT() bfin_read16(MDMA2_D1_CURR_X_COUNT)
-#define bfin_write_MDMA2_D1_CURR_X_COUNT(val) bfin_write16(MDMA2_D1_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_D1_CURR_Y_COUNT() bfin_read16(MDMA2_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA2_D1_CURR_Y_COUNT(val) bfin_write16(MDMA2_D1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_D1_IRQ_STATUS() bfin_read16(MDMA2_D1_IRQ_STATUS)
-#define bfin_write_MDMA2_D1_IRQ_STATUS(val) bfin_write16(MDMA2_D1_IRQ_STATUS,val)
-#define bfin_read_MDMA2_D1_PERIPHERAL_MAP() bfin_read16(MDMA2_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D1_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_S1_CONFIG() bfin_read16(MDMA2_S1_CONFIG)
-#define bfin_write_MDMA2_S1_CONFIG(val) bfin_write16(MDMA2_S1_CONFIG,val)
-#define bfin_read_MDMA2_S1_NEXT_DESC_PTR() bfin_read32(MDMA2_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_S1_START_ADDR() bfin_read32(MDMA2_S1_START_ADDR)
-#define bfin_write_MDMA2_S1_START_ADDR(val) bfin_write32(MDMA2_S1_START_ADDR,val)
-#define bfin_read_MDMA2_S1_X_COUNT() bfin_read16(MDMA2_S1_X_COUNT)
-#define bfin_write_MDMA2_S1_X_COUNT(val) bfin_write16(MDMA2_S1_X_COUNT,val)
-#define bfin_read_MDMA2_S1_Y_COUNT() bfin_read16(MDMA2_S1_Y_COUNT)
-#define bfin_write_MDMA2_S1_Y_COUNT(val) bfin_write16(MDMA2_S1_Y_COUNT,val)
-#define bfin_read_MDMA2_S1_X_MODIFY() bfin_read16(MDMA2_S1_X_MODIFY)
-#define bfin_write_MDMA2_S1_X_MODIFY(val) bfin_write16(MDMA2_S1_X_MODIFY,val)
-#define bfin_read_MDMA2_S1_Y_MODIFY() bfin_read16(MDMA2_S1_Y_MODIFY)
-#define bfin_write_MDMA2_S1_Y_MODIFY(val) bfin_write16(MDMA2_S1_Y_MODIFY,val)
-#define bfin_read_MDMA2_S1_CURR_DESC_PTR() bfin_read32(MDMA2_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA2_S1_CURR_DESC_PTR(val) bfin_write32(MDMA2_S1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_S1_CURR_ADDR() bfin_read32(MDMA2_S1_CURR_ADDR)
-#define bfin_write_MDMA2_S1_CURR_ADDR(val) bfin_write32(MDMA2_S1_CURR_ADDR,val)
-#define bfin_read_MDMA2_S1_CURR_X_COUNT() bfin_read16(MDMA2_S1_CURR_X_COUNT)
-#define bfin_write_MDMA2_S1_CURR_X_COUNT(val) bfin_write16(MDMA2_S1_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_S1_CURR_Y_COUNT() bfin_read16(MDMA2_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA2_S1_CURR_Y_COUNT(val) bfin_write16(MDMA2_S1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_S1_IRQ_STATUS() bfin_read16(MDMA2_S1_IRQ_STATUS)
-#define bfin_write_MDMA2_S1_IRQ_STATUS(val) bfin_write16(MDMA2_S1_IRQ_STATUS,val)
-#define bfin_read_MDMA2_S1_PERIPHERAL_MAP() bfin_read16(MDMA2_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG,val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR,val)
+#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT,val)
+#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT,val)
+#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY,val)
+#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY,val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR,val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS,val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG,val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR,val)
+#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT,val)
+#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT,val)
+#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY,val)
+#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY,val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR,val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS,val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG,val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR,val)
+#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT,val)
+#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT,val)
+#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY,val)
+#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY,val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR,val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS,val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG,val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR,val)
+#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT,val)
+#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT,val)
+#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY,val)
+#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY,val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR,val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS,val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP,val)
/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
#define bfin_read_IMDMA_D0_CONFIG() bfin_read16(IMDMA_D0_CONFIG)
#define bfin_write_IMDMA_D0_CONFIG(val) bfin_write16(IMDMA_D0_CONFIG,val)
@@ -1465,65 +1457,4 @@
#define bfin_read_IMDMA_S1_IRQ_STATUS() bfin_read16(IMDMA_S1_IRQ_STATUS)
#define bfin_write_IMDMA_S1_IRQ_STATUS(val) bfin_write16(IMDMA_S1_IRQ_STATUS,val)
-#define bfin_read_MDMA_S0_CONFIG() bfin_read_MDMA1_S0_CONFIG()
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA1_S0_CONFIG(val)
-#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read_MDMA1_S0_IRQ_STATUS()
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA1_S0_IRQ_STATUS(val)
-#define bfin_read_MDMA_S0_X_MODIFY() bfin_read_MDMA1_S0_X_MODIFY()
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA1_S0_X_MODIFY(val)
-#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read_MDMA1_S0_Y_MODIFY()
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA1_S0_Y_MODIFY(val)
-#define bfin_read_MDMA_S0_X_COUNT() bfin_read_MDMA1_S0_X_COUNT()
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA1_S0_X_COUNT(val)
-#define bfin_read_MDMA_S0_Y_COUNT() bfin_read_MDMA1_S0_Y_COUNT()
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA1_S0_Y_COUNT(val)
-#define bfin_read_MDMA_S0_START_ADDR() bfin_read_MDMA1_S0_START_ADDR()
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA1_S0_START_ADDR(val)
-#define bfin_read_MDMA_D0_CONFIG() bfin_read_MDMA1_D0_CONFIG()
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA1_D0_CONFIG(val)
-#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read_MDMA1_D0_IRQ_STATUS()
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA1_D0_IRQ_STATUS(val)
-#define bfin_read_MDMA_D0_X_MODIFY() bfin_read_MDMA1_D0_X_MODIFY()
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA1_D0_X_MODIFY(val)
-#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read_MDMA1_D0_Y_MODIFY()
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA1_D0_Y_MODIFY(val)
-#define bfin_read_MDMA_D0_X_COUNT() bfin_read_MDMA1_D0_X_COUNT()
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA1_D0_X_COUNT(val)
-#define bfin_read_MDMA_D0_Y_COUNT() bfin_read_MDMA1_D0_Y_COUNT()
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA1_D0_Y_COUNT(val)
-#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA1_D0_START_ADDR()
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val)
-
-#define bfin_read_MDMA_S1_CONFIG() bfin_read_MDMA1_S1_CONFIG()
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA1_S1_CONFIG(val)
-#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read_MDMA1_S1_IRQ_STATUS()
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA1_S1_IRQ_STATUS(val)
-#define bfin_read_MDMA_S1_X_MODIFY() bfin_read_MDMA1_S1_X_MODIFY()
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA1_S1_X_MODIFY(val)
-#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read_MDMA1_S1_Y_MODIFY()
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA1_S1_Y_MODIFY(val)
-#define bfin_read_MDMA_S1_X_COUNT() bfin_read_MDMA1_S1_X_COUNT()
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA1_S1_X_COUNT(val)
-#define bfin_read_MDMA_S1_Y_COUNT() bfin_read_MDMA1_S1_Y_COUNT()
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA1_S1_Y_COUNT(val)
-#define bfin_read_MDMA_S1_START_ADDR() bfin_read_MDMA1_S1_START_ADDR()
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA1_S1_START_ADDR(val)
-#define bfin_read_MDMA_D1_CONFIG() bfin_read_MDMA1_D1_CONFIG()
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA1_D1_CONFIG(val)
-#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read_MDMA1_D1_IRQ_STATUS()
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA1_D1_IRQ_STATUS(val)
-#define bfin_read_MDMA_D1_X_MODIFY() bfin_read_MDMA1_D1_X_MODIFY()
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA1_D1_X_MODIFY(val)
-#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read_MDMA1_D1_Y_MODIFY()
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA1_D1_Y_MODIFY(val)
-#define bfin_read_MDMA_D1_X_COUNT() bfin_read_MDMA1_D1_X_COUNT()
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA1_D1_X_COUNT(val)
-#define bfin_read_MDMA_D1_Y_COUNT() bfin_read_MDMA1_D1_Y_COUNT()
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA1_D1_Y_COUNT(val)
-#define bfin_read_MDMA_D1_START_ADDR() bfin_read_MDMA1_D1_START_ADDR()
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA1_D1_START_ADDR(val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
#endif /* _CDEF_BF561_H */
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 79e048d452e..71e805ea74e 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1,18 +1,11 @@
/*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
#ifndef _DEF_BF561_H
#define _DEF_BF561_H
-/*
-#if !defined(__ADSPBF561__)
-#warning defBF561.h should only be included for BF561 chip.
-#endif
-*/
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
/*********************************************************************************** */
/* System MMR Register Map */
@@ -311,10 +304,10 @@
#define PPI1_FRAME 0xFFC01310 /* PPI1 Frame Length register */
/*DMA traffic control registers */
-#define DMA1_TC_PER 0xFFC01B0C /* Traffic control periods */
-#define DMA1_TC_CNT 0xFFC01B10 /* Traffic control current counts */
-#define DMA2_TC_PER 0xFFC00B0C /* Traffic control periods */
-#define DMA2_TC_CNT 0xFFC00B10 /* Traffic control current counts */
+#define DMAC0_TC_PER 0xFFC00B0C /* Traffic control periods */
+#define DMAC0_TC_CNT 0xFFC00B10 /* Traffic control current counts */
+#define DMAC1_TC_PER 0xFFC01B0C /* Traffic control periods */
+#define DMAC1_TC_CNT 0xFFC01B10 /* Traffic control current counts */
/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
#define DMA1_0_CONFIG 0xFFC01C08 /* DMA1 Channel 0 Configuration register */
@@ -486,61 +479,61 @@
#define DMA1_11_PERIPHERAL_MAP 0xFFC01EEC /* DMA1 Channel 11 Peripheral Map Register */
/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
-#define MDMA1_D0_CONFIG 0xFFC01F08 /*MemDMA1 Stream 0 Destination Configuration */
-#define MDMA1_D0_NEXT_DESC_PTR 0xFFC01F00 /*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */
-#define MDMA1_D0_START_ADDR 0xFFC01F04 /*MemDMA1 Stream 0 Destination Start Address */
-#define MDMA1_D0_X_COUNT 0xFFC01F10 /*MemDMA1 Stream 0 Destination Inner-Loop Count */
-#define MDMA1_D0_Y_COUNT 0xFFC01F18 /*MemDMA1 Stream 0 Destination Outer-Loop Count */
-#define MDMA1_D0_X_MODIFY 0xFFC01F14 /*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */
-#define MDMA1_D0_Y_MODIFY 0xFFC01F1C /*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */
-#define MDMA1_D0_CURR_DESC_PTR 0xFFC01F20 /*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */
-#define MDMA1_D0_CURR_ADDR 0xFFC01F24 /*MemDMA1 Stream 0 Destination Current Address */
-#define MDMA1_D0_CURR_X_COUNT 0xFFC01F30 /*MemDMA1 Stream 0 Dest Current Inner-Loop Count */
-#define MDMA1_D0_CURR_Y_COUNT 0xFFC01F38 /*MemDMA1 Stream 0 Dest Current Outer-Loop Count */
-#define MDMA1_D0_IRQ_STATUS 0xFFC01F28 /*MemDMA1 Stream 0 Destination Interrupt/Status */
-#define MDMA1_D0_PERIPHERAL_MAP 0xFFC01F2C /*MemDMA1 Stream 0 Destination Peripheral Map */
-
-#define MDMA1_S0_CONFIG 0xFFC01F48 /*MemDMA1 Stream 0 Source Configuration */
-#define MDMA1_S0_NEXT_DESC_PTR 0xFFC01F40 /*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */
-#define MDMA1_S0_START_ADDR 0xFFC01F44 /*MemDMA1 Stream 0 Source Start Address */
-#define MDMA1_S0_X_COUNT 0xFFC01F50 /*MemDMA1 Stream 0 Source Inner-Loop Count */
-#define MDMA1_S0_Y_COUNT 0xFFC01F58 /*MemDMA1 Stream 0 Source Outer-Loop Count */
-#define MDMA1_S0_X_MODIFY 0xFFC01F54 /*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */
-#define MDMA1_S0_Y_MODIFY 0xFFC01F5C /*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */
-#define MDMA1_S0_CURR_DESC_PTR 0xFFC01F60 /*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */
-#define MDMA1_S0_CURR_ADDR 0xFFC01F64 /*MemDMA1 Stream 0 Source Current Address */
-#define MDMA1_S0_CURR_X_COUNT 0xFFC01F70 /*MemDMA1 Stream 0 Source Current Inner-Loop Count */
-#define MDMA1_S0_CURR_Y_COUNT 0xFFC01F78 /*MemDMA1 Stream 0 Source Current Outer-Loop Count */
-#define MDMA1_S0_IRQ_STATUS 0xFFC01F68 /*MemDMA1 Stream 0 Source Interrupt/Status */
-#define MDMA1_S0_PERIPHERAL_MAP 0xFFC01F6C /*MemDMA1 Stream 0 Source Peripheral Map */
-
-#define MDMA1_D1_CONFIG 0xFFC01F88 /*MemDMA1 Stream 1 Destination Configuration */
-#define MDMA1_D1_NEXT_DESC_PTR 0xFFC01F80 /*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */
-#define MDMA1_D1_START_ADDR 0xFFC01F84 /*MemDMA1 Stream 1 Destination Start Address */
-#define MDMA1_D1_X_COUNT 0xFFC01F90 /*MemDMA1 Stream 1 Destination Inner-Loop Count */
-#define MDMA1_D1_Y_COUNT 0xFFC01F98 /*MemDMA1 Stream 1 Destination Outer-Loop Count */
-#define MDMA1_D1_X_MODIFY 0xFFC01F94 /*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */
-#define MDMA1_D1_Y_MODIFY 0xFFC01F9C /*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */
-#define MDMA1_D1_CURR_DESC_PTR 0xFFC01FA0 /*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */
-#define MDMA1_D1_CURR_ADDR 0xFFC01FA4 /*MemDMA1 Stream 1 Dest Current Address */
-#define MDMA1_D1_CURR_X_COUNT 0xFFC01FB0 /*MemDMA1 Stream 1 Dest Current Inner-Loop Count */
-#define MDMA1_D1_CURR_Y_COUNT 0xFFC01FB8 /*MemDMA1 Stream 1 Dest Current Outer-Loop Count */
-#define MDMA1_D1_IRQ_STATUS 0xFFC01FA8 /*MemDMA1 Stream 1 Dest Interrupt/Status */
-#define MDMA1_D1_PERIPHERAL_MAP 0xFFC01FAC /*MemDMA1 Stream 1 Dest Peripheral Map */
-
-#define MDMA1_S1_CONFIG 0xFFC01FC8 /*MemDMA1 Stream 1 Source Configuration */
-#define MDMA1_S1_NEXT_DESC_PTR 0xFFC01FC0 /*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */
-#define MDMA1_S1_START_ADDR 0xFFC01FC4 /*MemDMA1 Stream 1 Source Start Address */
-#define MDMA1_S1_X_COUNT 0xFFC01FD0 /*MemDMA1 Stream 1 Source Inner-Loop Count */
-#define MDMA1_S1_Y_COUNT 0xFFC01FD8 /*MemDMA1 Stream 1 Source Outer-Loop Count */
-#define MDMA1_S1_X_MODIFY 0xFFC01FD4 /*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */
-#define MDMA1_S1_Y_MODIFY 0xFFC01FDC /*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */
-#define MDMA1_S1_CURR_DESC_PTR 0xFFC01FE0 /*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */
-#define MDMA1_S1_CURR_ADDR 0xFFC01FE4 /*MemDMA1 Stream 1 Source Current Address */
-#define MDMA1_S1_CURR_X_COUNT 0xFFC01FF0 /*MemDMA1 Stream 1 Source Current Inner-Loop Count */
-#define MDMA1_S1_CURR_Y_COUNT 0xFFC01FF8 /*MemDMA1 Stream 1 Source Current Outer-Loop Count */
-#define MDMA1_S1_IRQ_STATUS 0xFFC01FE8 /*MemDMA1 Stream 1 Source Interrupt/Status */
-#define MDMA1_S1_PERIPHERAL_MAP 0xFFC01FEC /*MemDMA1 Stream 1 Source Peripheral Map */
+#define MDMA_D2_CONFIG 0xFFC01F08 /*MemDMA1 Stream 0 Destination Configuration */
+#define MDMA_D2_NEXT_DESC_PTR 0xFFC01F00 /*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */
+#define MDMA_D2_START_ADDR 0xFFC01F04 /*MemDMA1 Stream 0 Destination Start Address */
+#define MDMA_D2_X_COUNT 0xFFC01F10 /*MemDMA1 Stream 0 Destination Inner-Loop Count */
+#define MDMA_D2_Y_COUNT 0xFFC01F18 /*MemDMA1 Stream 0 Destination Outer-Loop Count */
+#define MDMA_D2_X_MODIFY 0xFFC01F14 /*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */
+#define MDMA_D2_Y_MODIFY 0xFFC01F1C /*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */
+#define MDMA_D2_CURR_DESC_PTR 0xFFC01F20 /*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */
+#define MDMA_D2_CURR_ADDR 0xFFC01F24 /*MemDMA1 Stream 0 Destination Current Address */
+#define MDMA_D2_CURR_X_COUNT 0xFFC01F30 /*MemDMA1 Stream 0 Dest Current Inner-Loop Count */
+#define MDMA_D2_CURR_Y_COUNT 0xFFC01F38 /*MemDMA1 Stream 0 Dest Current Outer-Loop Count */
+#define MDMA_D2_IRQ_STATUS 0xFFC01F28 /*MemDMA1 Stream 0 Destination Interrupt/Status */
+#define MDMA_D2_PERIPHERAL_MAP 0xFFC01F2C /*MemDMA1 Stream 0 Destination Peripheral Map */
+
+#define MDMA_S2_CONFIG 0xFFC01F48 /*MemDMA1 Stream 0 Source Configuration */
+#define MDMA_S2_NEXT_DESC_PTR 0xFFC01F40 /*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */
+#define MDMA_S2_START_ADDR 0xFFC01F44 /*MemDMA1 Stream 0 Source Start Address */
+#define MDMA_S2_X_COUNT 0xFFC01F50 /*MemDMA1 Stream 0 Source Inner-Loop Count */
+#define MDMA_S2_Y_COUNT 0xFFC01F58 /*MemDMA1 Stream 0 Source Outer-Loop Count */
+#define MDMA_S2_X_MODIFY 0xFFC01F54 /*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */
+#define MDMA_S2_Y_MODIFY 0xFFC01F5C /*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */
+#define MDMA_S2_CURR_DESC_PTR 0xFFC01F60 /*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */
+#define MDMA_S2_CURR_ADDR 0xFFC01F64 /*MemDMA1 Stream 0 Source Current Address */
+#define MDMA_S2_CURR_X_COUNT 0xFFC01F70 /*MemDMA1 Stream 0 Source Current Inner-Loop Count */
+#define MDMA_S2_CURR_Y_COUNT 0xFFC01F78 /*MemDMA1 Stream 0 Source Current Outer-Loop Count */
+#define MDMA_S2_IRQ_STATUS 0xFFC01F68 /*MemDMA1 Stream 0 Source Interrupt/Status */
+#define MDMA_S2_PERIPHERAL_MAP 0xFFC01F6C /*MemDMA1 Stream 0 Source Peripheral Map */
+
+#define MDMA_D3_CONFIG 0xFFC01F88 /*MemDMA1 Stream 1 Destination Configuration */
+#define MDMA_D3_NEXT_DESC_PTR 0xFFC01F80 /*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */
+#define MDMA_D3_START_ADDR 0xFFC01F84 /*MemDMA1 Stream 1 Destination Start Address */
+#define MDMA_D3_X_COUNT 0xFFC01F90 /*MemDMA1 Stream 1 Destination Inner-Loop Count */
+#define MDMA_D3_Y_COUNT 0xFFC01F98 /*MemDMA1 Stream 1 Destination Outer-Loop Count */
+#define MDMA_D3_X_MODIFY 0xFFC01F94 /*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */
+#define MDMA_D3_Y_MODIFY 0xFFC01F9C /*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */
+#define MDMA_D3_CURR_DESC_PTR 0xFFC01FA0 /*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */
+#define MDMA_D3_CURR_ADDR 0xFFC01FA4 /*MemDMA1 Stream 1 Dest Current Address */
+#define MDMA_D3_CURR_X_COUNT 0xFFC01FB0 /*MemDMA1 Stream 1 Dest Current Inner-Loop Count */
+#define MDMA_D3_CURR_Y_COUNT 0xFFC01FB8 /*MemDMA1 Stream 1 Dest Current Outer-Loop Count */
+#define MDMA_D3_IRQ_STATUS 0xFFC01FA8 /*MemDMA1 Stream 1 Dest Interrupt/Status */
+#define MDMA_D3_PERIPHERAL_MAP 0xFFC01FAC /*MemDMA1 Stream 1 Dest Peripheral Map */
+
+#define MDMA_S3_CONFIG 0xFFC01FC8 /*MemDMA1 Stream 1 Source Configuration */
+#define MDMA_S3_NEXT_DESC_PTR 0xFFC01FC0 /*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */
+#define MDMA_S3_START_ADDR 0xFFC01FC4 /*MemDMA1 Stream 1 Source Start Address */
+#define MDMA_S3_X_COUNT 0xFFC01FD0 /*MemDMA1 Stream 1 Source Inner-Loop Count */
+#define MDMA_S3_Y_COUNT 0xFFC01FD8 /*MemDMA1 Stream 1 Source Outer-Loop Count */
+#define MDMA_S3_X_MODIFY 0xFFC01FD4 /*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */
+#define MDMA_S3_Y_MODIFY 0xFFC01FDC /*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */
+#define MDMA_S3_CURR_DESC_PTR 0xFFC01FE0 /*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */
+#define MDMA_S3_CURR_ADDR 0xFFC01FE4 /*MemDMA1 Stream 1 Source Current Address */
+#define MDMA_S3_CURR_X_COUNT 0xFFC01FF0 /*MemDMA1 Stream 1 Source Current Inner-Loop Count */
+#define MDMA_S3_CURR_Y_COUNT 0xFFC01FF8 /*MemDMA1 Stream 1 Source Current Outer-Loop Count */
+#define MDMA_S3_IRQ_STATUS 0xFFC01FE8 /*MemDMA1 Stream 1 Source Interrupt/Status */
+#define MDMA_S3_PERIPHERAL_MAP 0xFFC01FEC /*MemDMA1 Stream 1 Source Peripheral Map */
/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
#define DMA2_0_CONFIG 0xFFC00C08 /* DMA2 Channel 0 Configuration register */
@@ -712,117 +705,61 @@
#define DMA2_11_PERIPHERAL_MAP 0xFFC00EEC /* DMA2 Channel 11 Peripheral Map Register */
/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
-#define MDMA2_D0_CONFIG 0xFFC00F08 /*MemDMA2 Stream 0 Destination Configuration register */
-#define MDMA2_D0_NEXT_DESC_PTR 0xFFC00F00 /*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */
-#define MDMA2_D0_START_ADDR 0xFFC00F04 /*MemDMA2 Stream 0 Destination Start Address */
-#define MDMA2_D0_X_COUNT 0xFFC00F10 /*MemDMA2 Stream 0 Dest Inner-Loop Count register */
-#define MDMA2_D0_Y_COUNT 0xFFC00F18 /*MemDMA2 Stream 0 Dest Outer-Loop Count register */
-#define MDMA2_D0_X_MODIFY 0xFFC00F14 /*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */
-#define MDMA2_D0_Y_MODIFY 0xFFC00F1C /*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */
-#define MDMA2_D0_CURR_DESC_PTR 0xFFC00F20 /*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */
-#define MDMA2_D0_CURR_ADDR 0xFFC00F24 /*MemDMA2 Stream 0 Destination Current Address */
-#define MDMA2_D0_CURR_X_COUNT 0xFFC00F30 /*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */
-#define MDMA2_D0_CURR_Y_COUNT 0xFFC00F38 /*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */
-#define MDMA2_D0_IRQ_STATUS 0xFFC00F28 /*MemDMA2 Stream 0 Dest Interrupt/Status Register */
-#define MDMA2_D0_PERIPHERAL_MAP 0xFFC00F2C /*MemDMA2 Stream 0 Destination Peripheral Map register */
-
-#define MDMA2_S0_CONFIG 0xFFC00F48 /*MemDMA2 Stream 0 Source Configuration register */
-#define MDMA2_S0_NEXT_DESC_PTR 0xFFC00F40 /*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */
-#define MDMA2_S0_START_ADDR 0xFFC00F44 /*MemDMA2 Stream 0 Source Start Address */
-#define MDMA2_S0_X_COUNT 0xFFC00F50 /*MemDMA2 Stream 0 Source Inner-Loop Count register */
-#define MDMA2_S0_Y_COUNT 0xFFC00F58 /*MemDMA2 Stream 0 Source Outer-Loop Count register */
-#define MDMA2_S0_X_MODIFY 0xFFC00F54 /*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */
-#define MDMA2_S0_Y_MODIFY 0xFFC00F5C /*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */
-#define MDMA2_S0_CURR_DESC_PTR 0xFFC00F60 /*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */
-#define MDMA2_S0_CURR_ADDR 0xFFC00F64 /*MemDMA2 Stream 0 Source Current Address */
-#define MDMA2_S0_CURR_X_COUNT 0xFFC00F70 /*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */
-#define MDMA2_S0_CURR_Y_COUNT 0xFFC00F78 /*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */
-#define MDMA2_S0_IRQ_STATUS 0xFFC00F68 /*MemDMA2 Stream 0 Source Interrupt/Status Register */
-#define MDMA2_S0_PERIPHERAL_MAP 0xFFC00F6C /*MemDMA2 Stream 0 Source Peripheral Map register */
-
-#define MDMA2_D1_CONFIG 0xFFC00F88 /*MemDMA2 Stream 1 Destination Configuration register */
-#define MDMA2_D1_NEXT_DESC_PTR 0xFFC00F80 /*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */
-#define MDMA2_D1_START_ADDR 0xFFC00F84 /*MemDMA2 Stream 1 Destination Start Address */
-#define MDMA2_D1_X_COUNT 0xFFC00F90 /*MemDMA2 Stream 1 Dest Inner-Loop Count register */
-#define MDMA2_D1_Y_COUNT 0xFFC00F98 /*MemDMA2 Stream 1 Dest Outer-Loop Count register */
-#define MDMA2_D1_X_MODIFY 0xFFC00F94 /*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */
-#define MDMA2_D1_Y_MODIFY 0xFFC00F9C /*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */
-#define MDMA2_D1_CURR_DESC_PTR 0xFFC00FA0 /*MemDMA2 Stream 1 Destination Current Descriptor Ptr */
-#define MDMA2_D1_CURR_ADDR 0xFFC00FA4 /*MemDMA2 Stream 1 Destination Current Address reg */
-#define MDMA2_D1_CURR_X_COUNT 0xFFC00FB0 /*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */
-#define MDMA2_D1_CURR_Y_COUNT 0xFFC00FB8 /*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */
-#define MDMA2_D1_IRQ_STATUS 0xFFC00FA8 /*MemDMA2 Stream 1 Destination Interrupt/Status Reg */
-#define MDMA2_D1_PERIPHERAL_MAP 0xFFC00FAC /*MemDMA2 Stream 1 Destination Peripheral Map register */
-
-#define MDMA2_S1_CONFIG 0xFFC00FC8 /*MemDMA2 Stream 1 Source Configuration register */
-#define MDMA2_S1_NEXT_DESC_PTR 0xFFC00FC0 /*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */
-#define MDMA2_S1_START_ADDR 0xFFC00FC4 /*MemDMA2 Stream 1 Source Start Address */
-#define MDMA2_S1_X_COUNT 0xFFC00FD0 /*MemDMA2 Stream 1 Source Inner-Loop Count register */
-#define MDMA2_S1_Y_COUNT 0xFFC00FD8 /*MemDMA2 Stream 1 Source Outer-Loop Count register */
-#define MDMA2_S1_X_MODIFY 0xFFC00FD4 /*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */
-#define MDMA2_S1_Y_MODIFY 0xFFC00FDC /*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */
-#define MDMA2_S1_CURR_DESC_PTR 0xFFC00FE0 /*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */
-#define MDMA2_S1_CURR_ADDR 0xFFC00FE4 /*MemDMA2 Stream 1 Source Current Address */
-#define MDMA2_S1_CURR_X_COUNT 0xFFC00FF0 /*MemDMA2 Stream 1 Source Current Inner-Loop Count */
-#define MDMA2_S1_CURR_Y_COUNT 0xFFC00FF8 /*MemDMA2 Stream 1 Source Current Outer-Loop Count */
-#define MDMA2_S1_IRQ_STATUS 0xFFC00FE8 /*MemDMA2 Stream 1 Source Interrupt/Status Register */
-#define MDMA2_S1_PERIPHERAL_MAP 0xFFC00FEC /*MemDMA2 Stream 1 Source Peripheral Map register */
-
-#define MDMA_D0_NEXT_DESC_PTR MDMA1_D0_NEXT_DESC_PTR
-#define MDMA_D0_START_ADDR MDMA1_D0_START_ADDR
-#define MDMA_D0_CONFIG MDMA1_D0_CONFIG
-#define MDMA_D0_X_COUNT MDMA1_D0_X_COUNT
-#define MDMA_D0_X_MODIFY MDMA1_D0_X_MODIFY
-#define MDMA_D0_Y_COUNT MDMA1_D0_Y_COUNT
-#define MDMA_D0_Y_MODIFY MDMA1_D0_Y_MODIFY
-#define MDMA_D0_CURR_DESC_PTR MDMA1_D0_CURR_DESC_PTR
-#define MDMA_D0_CURR_ADDR MDMA1_D0_CURR_ADDR
-#define MDMA_D0_IRQ_STATUS MDMA1_D0_IRQ_STATUS
-#define MDMA_D0_PERIPHERAL_MAP MDMA1_D0_PERIPHERAL_MAP
-#define MDMA_D0_CURR_X_COUNT MDMA1_D0_CURR_X_COUNT
-#define MDMA_D0_CURR_Y_COUNT MDMA1_D0_CURR_Y_COUNT
-
-#define MDMA_S0_NEXT_DESC_PTR MDMA1_S0_NEXT_DESC_PTR
-#define MDMA_S0_START_ADDR MDMA1_S0_START_ADDR
-#define MDMA_S0_CONFIG MDMA1_S0_CONFIG
-#define MDMA_S0_X_COUNT MDMA1_S0_X_COUNT
-#define MDMA_S0_X_MODIFY MDMA1_S0_X_MODIFY
-#define MDMA_S0_Y_COUNT MDMA1_S0_Y_COUNT
-#define MDMA_S0_Y_MODIFY MDMA1_S0_Y_MODIFY
-#define MDMA_S0_CURR_DESC_PTR MDMA1_S0_CURR_DESC_PTR
-#define MDMA_S0_CURR_ADDR MDMA1_S0_CURR_ADDR
-#define MDMA_S0_IRQ_STATUS MDMA1_S0_IRQ_STATUS
-#define MDMA_S0_PERIPHERAL_MAP MDMA1_S0_PERIPHERAL_MAP
-#define MDMA_S0_CURR_X_COUNT MDMA1_S0_CURR_X_COUNT
-#define MDMA_S0_CURR_Y_COUNT MDMA1_S0_CURR_Y_COUNT
-
-#define MDMA_D1_NEXT_DESC_PTR MDMA1_D1_NEXT_DESC_PTR
-#define MDMA_D1_START_ADDR MDMA1_D1_START_ADDR
-#define MDMA_D1_CONFIG MDMA1_D1_CONFIG
-#define MDMA_D1_X_COUNT MDMA1_D1_X_COUNT
-#define MDMA_D1_X_MODIFY MDMA1_D1_X_MODIFY
-#define MDMA_D1_Y_COUNT MDMA1_D1_Y_COUNT
-#define MDMA_D1_Y_MODIFY MDMA1_D1_Y_MODIFY
-#define MDMA_D1_CURR_DESC_PTR MDMA1_D1_CURR_DESC_PTR
-#define MDMA_D1_CURR_ADDR MDMA1_D1_CURR_ADDR
-#define MDMA_D1_IRQ_STATUS MDMA1_D1_IRQ_STATUS
-#define MDMA_D1_PERIPHERAL_MAP MDMA1_D1_PERIPHERAL_MAP
-#define MDMA_D1_CURR_X_COUNT MDMA1_D1_CURR_X_COUNT
-#define MDMA_D1_CURR_Y_COUNT MDMA1_D1_CURR_Y_COUNT
-
-#define MDMA_S1_NEXT_DESC_PTR MDMA1_S1_NEXT_DESC_PTR
-#define MDMA_S1_START_ADDR MDMA1_S1_START_ADDR
-#define MDMA_S1_CONFIG MDMA1_S1_CONFIG
-#define MDMA_S1_X_COUNT MDMA1_S1_X_COUNT
-#define MDMA_S1_X_MODIFY MDMA1_S1_X_MODIFY
-#define MDMA_S1_Y_COUNT MDMA1_S1_Y_COUNT
-#define MDMA_S1_Y_MODIFY MDMA1_S1_Y_MODIFY
-#define MDMA_S1_CURR_DESC_PTR MDMA1_S1_CURR_DESC_PTR
-#define MDMA_S1_CURR_ADDR MDMA1_S1_CURR_ADDR
-#define MDMA_S1_IRQ_STATUS MDMA1_S1_IRQ_STATUS
-#define MDMA_S1_PERIPHERAL_MAP MDMA1_S1_PERIPHERAL_MAP
-#define MDMA_S1_CURR_X_COUNT MDMA1_S1_CURR_X_COUNT
-#define MDMA_S1_CURR_Y_COUNT MDMA1_S1_CURR_Y_COUNT
+#define MDMA_D0_CONFIG 0xFFC00F08 /*MemDMA2 Stream 0 Destination Configuration register */
+#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */
+#define MDMA_D0_START_ADDR 0xFFC00F04 /*MemDMA2 Stream 0 Destination Start Address */
+#define MDMA_D0_X_COUNT 0xFFC00F10 /*MemDMA2 Stream 0 Dest Inner-Loop Count register */
+#define MDMA_D0_Y_COUNT 0xFFC00F18 /*MemDMA2 Stream 0 Dest Outer-Loop Count register */
+#define MDMA_D0_X_MODIFY 0xFFC00F14 /*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */
+#define MDMA_D0_Y_MODIFY 0xFFC00F1C /*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */
+#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */
+#define MDMA_D0_CURR_ADDR 0xFFC00F24 /*MemDMA2 Stream 0 Destination Current Address */
+#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */
+#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */
+#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /*MemDMA2 Stream 0 Dest Interrupt/Status Register */
+#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /*MemDMA2 Stream 0 Destination Peripheral Map register */
+
+#define MDMA_S0_CONFIG 0xFFC00F48 /*MemDMA2 Stream 0 Source Configuration register */
+#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */
+#define MDMA_S0_START_ADDR 0xFFC00F44 /*MemDMA2 Stream 0 Source Start Address */
+#define MDMA_S0_X_COUNT 0xFFC00F50 /*MemDMA2 Stream 0 Source Inner-Loop Count register */
+#define MDMA_S0_Y_COUNT 0xFFC00F58 /*MemDMA2 Stream 0 Source Outer-Loop Count register */
+#define MDMA_S0_X_MODIFY 0xFFC00F54 /*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */
+#define MDMA_S0_Y_MODIFY 0xFFC00F5C /*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */
+#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */
+#define MDMA_S0_CURR_ADDR 0xFFC00F64 /*MemDMA2 Stream 0 Source Current Address */
+#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */
+#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */
+#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /*MemDMA2 Stream 0 Source Interrupt/Status Register */
+#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /*MemDMA2 Stream 0 Source Peripheral Map register */
+
+#define MDMA_D1_CONFIG 0xFFC00F88 /*MemDMA2 Stream 1 Destination Configuration register */
+#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */
+#define MDMA_D1_START_ADDR 0xFFC00F84 /*MemDMA2 Stream 1 Destination Start Address */
+#define MDMA_D1_X_COUNT 0xFFC00F90 /*MemDMA2 Stream 1 Dest Inner-Loop Count register */
+#define MDMA_D1_Y_COUNT 0xFFC00F98 /*MemDMA2 Stream 1 Dest Outer-Loop Count register */
+#define MDMA_D1_X_MODIFY 0xFFC00F94 /*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */
+#define MDMA_D1_Y_MODIFY 0xFFC00F9C /*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */
+#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /*MemDMA2 Stream 1 Destination Current Descriptor Ptr */
+#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /*MemDMA2 Stream 1 Destination Current Address reg */
+#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */
+#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */
+#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /*MemDMA2 Stream 1 Destination Interrupt/Status Reg */
+#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /*MemDMA2 Stream 1 Destination Peripheral Map register */
+
+#define MDMA_S1_CONFIG 0xFFC00FC8 /*MemDMA2 Stream 1 Source Configuration register */
+#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */
+#define MDMA_S1_START_ADDR 0xFFC00FC4 /*MemDMA2 Stream 1 Source Start Address */
+#define MDMA_S1_X_COUNT 0xFFC00FD0 /*MemDMA2 Stream 1 Source Inner-Loop Count register */
+#define MDMA_S1_Y_COUNT 0xFFC00FD8 /*MemDMA2 Stream 1 Source Outer-Loop Count register */
+#define MDMA_S1_X_MODIFY 0xFFC00FD4 /*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */
+#define MDMA_S1_Y_MODIFY 0xFFC00FDC /*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */
+#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */
+#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /*MemDMA2 Stream 1 Source Current Address */
+#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /*MemDMA2 Stream 1 Source Current Inner-Loop Count */
+#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /*MemDMA2 Stream 1 Source Current Outer-Loop Count */
+#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /*MemDMA2 Stream 1 Source Interrupt/Status Register */
+#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /*MemDMA2 Stream 1 Source Peripheral Map register */
/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
#define IMDMA_D0_CONFIG 0xFFC01808 /*IMDMA Stream 0 Destination Configuration */
@@ -927,83 +864,6 @@
#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */
#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */
-/* ***************************** UART CONTROLLER MASKS ********************** */
-
-/* UART_LCR Register */
-
-#define DLAB 0x80
-#define SB 0x40
-#define STP 0x20
-#define EPS 0x10
-#define PEN 0x08
-#define STB 0x04
-#define WLS(x) ((x-5) & 0x03)
-
-#define DLAB_P 0x07
-#define SB_P 0x06
-#define STP_P 0x05
-#define EPS_P 0x04
-#define PEN_P 0x03
-#define STB_P 0x02
-#define WLS_P1 0x01
-#define WLS_P0 0x00
-
-/* UART_MCR Register */
-#define LOOP_ENA 0x10
-#define LOOP_ENA_P 0x04
-
-/* UART_LSR Register */
-#define TEMT 0x40
-#define THRE 0x20
-#define BI 0x10
-#define FE 0x08
-#define PE 0x04
-#define OE 0x02
-#define DR 0x01
-
-#define TEMP_P 0x06
-#define THRE_P 0x05
-#define BI_P 0x04
-#define FE_P 0x03
-#define PE_P 0x02
-#define OE_P 0x01
-#define DR_P 0x00
-
-/* UART_IER Register */
-#define ELSI 0x04
-#define ETBEI 0x02
-#define ERBFI 0x01
-
-#define ELSI_P 0x02
-#define ETBEI_P 0x01
-#define ERBFI_P 0x00
-
-/* UART_IIR Register */
-#define STATUS(x) ((x << 1) & 0x06)
-#define NINT 0x01
-#define STATUS_P1 0x02
-#define STATUS_P0 0x01
-#define NINT_P 0x00
-#define IIR_TX_READY 0x02 /* UART_THR empty */
-#define IIR_RX_READY 0x04 /* Receive data ready */
-#define IIR_LINE_CHANGE 0x06 /* Receive line status */
-#define IIR_STATUS 0x06
-
-/* UART_GCTL Register */
-#define FFE 0x20
-#define FPE 0x10
-#define RPOLC 0x08
-#define TPOLC 0x04
-#define IREN 0x02
-#define UCEN 0x01
-
-#define FFE_P 0x05
-#define FPE_P 0x04
-#define RPOLC_P 0x03
-#define TPOLC_P 0x02
-#define IREN_P 0x01
-#define UCEN_P 0x00
-
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
@@ -1230,44 +1090,6 @@
#define ERR_TYP_P0 0x0E
#define ERR_TYP_P1 0x0F
-/*/ ****************** PROGRAMMABLE FLAG MASKS ********************* */
-
-/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* General Purpose IO (0xFFC00700 - 0xFFC007FF) BIT POSITIONS */
-#define PF0_P 0
-#define PF1_P 1
-#define PF2_P 2
-#define PF3_P 3
-#define PF4_P 4
-#define PF5_P 5
-#define PF6_P 6
-#define PF7_P 7
-#define PF8_P 8
-#define PF9_P 9
-#define PF10_P 10
-#define PF11_P 11
-#define PF12_P 12
-#define PF13_P 13
-#define PF14_P 14
-#define PF15_P 15
-
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
/* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
index 4f8aa5d0880..57d5eab59fa 100644
--- a/arch/blackfin/mach-bf561/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -62,4 +62,6 @@
#define PORT_FIO1 GPIO_16
#define PORT_FIO2 GPIO_32
+#include <mach-common/ports-f.h>
+
#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index 5b96ea549a0..4cc91995f78 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -106,7 +106,7 @@
#define COREA_L1_SCRATCH_START 0xFFB00000
#define COREB_L1_SCRATCH_START 0xFF700000
-#ifdef __ASSEMBLY__
+#ifdef CONFIG_SMP
/*
* The following macros both return the address of the PDA for the
@@ -121,8 +121,7 @@
* is allowed to use the specified Dreg for determining the PDA
* address to be returned into Preg.
*/
-#ifdef CONFIG_SMP
-#define GET_PDA_SAFE(preg) \
+# define GET_PDA_SAFE(preg) \
preg.l = lo(DSPID); \
preg.h = hi(DSPID); \
preg = [preg]; \
@@ -158,7 +157,7 @@
preg = [preg]; \
4:
-#define GET_PDA(preg, dreg) \
+# define GET_PDA(preg, dreg) \
preg.l = lo(DSPID); \
preg.h = hi(DSPID); \
dreg = [preg]; \
@@ -169,13 +168,17 @@
preg = [preg]; \
1: \
-#define GET_CPUID(preg, dreg) \
+# define GET_CPUID(preg, dreg) \
preg.l = lo(DSPID); \
preg.h = hi(DSPID); \
dreg = [preg]; \
dreg = ROT dreg BY -1; \
dreg = CC;
+# ifndef __ASSEMBLY__
+
+# include <asm/processor.h>
+
static inline unsigned long get_l1_scratch_start_cpu(int cpu)
{
return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
@@ -210,8 +213,7 @@ static inline unsigned long get_l1_data_b_start(void)
return get_l1_data_b_start_cpu(blackfin_core_id());
}
+# endif /* __ASSEMBLY__ */
#endif /* CONFIG_SMP */
-#endif /* __ASSEMBLY__ */
-
#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/pll.h b/arch/blackfin/mach-bf561/include/mach/pll.h
index f2b1fbdb8e7..7977db2f1c1 100644
--- a/arch/blackfin/mach-bf561/include/mach/pll.h
+++ b/arch/blackfin/mach-bf561/include/mach/pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -7,57 +7,48 @@
#ifndef _MACH_PLL_H
#define _MACH_PLL_H
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+
#include <asm/blackfin.h>
#include <asm/irqflags.h>
+#include <mach/irq.h>
+
+#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+static inline void
+bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_PLL_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SICA_IWR0);
- iwr1 = bfin_read32(SICA_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SICA_IWR0, IWR_ENABLE(0));
- bfin_write32(SICA_IWR1, 0);
-
- bfin_write16(PLL_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SICA_IWR0, iwr0);
- bfin_write32(SICA_IWR1, iwr1);
- hard_local_irq_restore(flags);
+ unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
+
+ bfin_write32(SIC_IWR0 + SICA_SICB_OFF, iwr0);
+ bfin_write32(SIC_IWR1 + SICA_SICB_OFF, iwr1);
}
+#define bfin_iwr_restore bfin_iwr_restore
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
+static inline void
+bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
+ unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
{
- unsigned long flags, iwr0, iwr1;
-
- if (val == bfin_read_VR_CTL())
- return;
-
- flags = hard_local_irq_save();
- /* Enable the PLL Wakeup bit in SIC IWR */
- iwr0 = bfin_read32(SICA_IWR0);
- iwr1 = bfin_read32(SICA_IWR1);
- /* Only allow PPL Wakeup) */
- bfin_write32(SICA_IWR0, IWR_ENABLE(0));
- bfin_write32(SICA_IWR1, 0);
-
- bfin_write16(VR_CTL, val);
- SSYNC();
- asm("IDLE;");
-
- bfin_write32(SICA_IWR0, iwr0);
- bfin_write32(SICA_IWR1, iwr1);
- hard_local_irq_restore(flags);
+ unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
+
+ *iwr0 = bfin_read32(SIC_IWR0 + SICA_SICB_OFF);
+ *iwr1 = bfin_read32(SIC_IWR1 + SICA_SICB_OFF);
+ bfin_iwr_restore(niwr0, niwr1, niwr2);
}
+#define bfin_iwr_save bfin_iwr_save
+
+static inline void
+bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+ bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2);
+}
+
+#endif
+
+#endif
+
+#include <mach-common/pll.h>
-#endif /* _MACH_PLL_H */
+#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h
index 2c8c514dd38..346c60589be 100644
--- a/arch/blackfin/mach-bf561/include/mach/smp.h
+++ b/arch/blackfin/mach-bf561/include/mach/smp.h
@@ -7,6 +7,8 @@
#ifndef _MACH_BF561_SMP
#define _MACH_BF561_SMP
+/* This header has to stand alone to avoid circular deps */
+
struct task_struct;
void platform_init_cpus(void);
@@ -17,13 +19,13 @@ int platform_boot_secondary(unsigned int cpu, struct task_struct *idle);
void platform_secondary_init(unsigned int cpu);
-void platform_request_ipi(int (*handler)(int, void *));
+void platform_request_ipi(int irq, /*irq_handler_t*/ void *handler);
-void platform_send_ipi(cpumask_t callmap);
+void platform_send_ipi(cpumask_t callmap, int irq);
-void platform_send_ipi_cpu(unsigned int cpu);
+void platform_send_ipi_cpu(unsigned int cpu, int irq);
-void platform_clear_ipi(unsigned int cpu);
+void platform_clear_ipi(unsigned int cpu, int irq);
void bfin_local_timer_setup(void);
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index f540ed1257d..1074a7ef81c 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -86,12 +86,12 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
spin_lock(&boot_lock);
- if ((bfin_read_SIC_SYSCR() & COREB_SRAM_INIT) == 0) {
+ if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
/* CoreB already running, sending ipi to wakeup it */
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
} else {
/* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
- bfin_write_SIC_SYSCR(bfin_read_SIC_SYSCR() & ~COREB_SRAM_INIT);
+ bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
SSYNC();
}
@@ -111,41 +111,46 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
panic("CPU%u: processor failed to boot\n", cpu);
}
-void __init platform_request_ipi(irq_handler_t handler)
+static const char supple0[] = "IRQ_SUPPLE_0";
+static const char supple1[] = "IRQ_SUPPLE_1";
+void __init platform_request_ipi(int irq, void *handler)
{
int ret;
+ const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
- ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED,
- "Supplemental Interrupt0", handler);
+ ret = request_irq(irq, handler, IRQF_DISABLED | IRQF_PERCPU, name, handler);
if (ret)
- panic("Cannot request supplemental interrupt 0 for IPI service");
+ panic("Cannot request %s for IPI service", name);
}
-void platform_send_ipi(cpumask_t callmap)
+void platform_send_ipi(cpumask_t callmap, int irq)
{
unsigned int cpu;
+ int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
for_each_cpu_mask(cpu, callmap) {
BUG_ON(cpu >= 2);
SSYNC();
- bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+ bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
SSYNC();
}
}
-void platform_send_ipi_cpu(unsigned int cpu)
+void platform_send_ipi_cpu(unsigned int cpu, int irq)
{
+ int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
BUG_ON(cpu >= 2);
SSYNC();
- bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+ bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
SSYNC();
}
-void platform_clear_ipi(unsigned int cpu)
+void platform_clear_ipi(unsigned int cpu, int irq)
{
+ int offset = (irq == IRQ_SUPPLE_0) ? 10 : 12;
BUG_ON(cpu >= 2);
SSYNC();
- bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu)));
+ bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
SSYNC();
}
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 2ca915ee181..bc08c98d008 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -615,7 +615,7 @@ ENTRY(_system_call)
#ifdef CONFIG_IPIPE
r0 = sp;
SP += -12;
- call ___ipipe_syscall_root;
+ pseudo_long_call ___ipipe_syscall_root, p0;
SP += 12;
cc = r0 == 1;
if cc jump .Lsyscall_really_exit;
@@ -692,7 +692,7 @@ ENTRY(_system_call)
[--sp] = reti;
SP += 4; /* don't merge with next insn to keep the pattern obvious */
SP += -12;
- call ___ipipe_sync_root;
+ pseudo_long_call ___ipipe_sync_root, p4;
SP += 12;
jump .Lresume_userspace_1;
.Lsyscall_no_irqsync:
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index da7e3c63746..a604f19d8dc 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -866,7 +866,6 @@ static void bfin_gpio_unmask_irq(unsigned int irq)
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
- pint[bank]->request = pintbit;
pint[bank]->mask_set = pintbit;
}
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 80884b136a0..42fa87e8375 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -23,9 +23,6 @@
void bfin_pm_suspend_standby_enter(void)
{
- unsigned long flags;
-
- flags = hard_local_irq_save();
bfin_pm_standby_setup();
#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -55,8 +52,6 @@ void bfin_pm_suspend_standby_enter(void)
#else
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
-
- hard_local_irq_restore(flags);
}
int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -127,7 +122,6 @@ static void flushinv_all_dcache(void)
int bfin_pm_suspend_mem_enter(void)
{
- unsigned long flags;
int wakeup, ret;
unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
@@ -149,12 +143,9 @@ int bfin_pm_suspend_mem_enter(void)
wakeup |= GPWE;
#endif
- flags = hard_local_irq_save();
-
ret = blackfin_dma_suspend();
if (ret) {
- hard_local_irq_restore(flags);
kfree(memptr);
return ret;
}
@@ -178,7 +169,6 @@ int bfin_pm_suspend_mem_enter(void)
bfin_gpio_pm_hibernate_restore();
blackfin_dma_resume();
- hard_local_irq_restore(flags);
kfree(memptr);
return 0;
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index a17107a700d..9f251406a76 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
+#include <linux/cpumask.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/slab.h>
@@ -43,12 +44,6 @@ void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
*init_saved_dcplb_fault_addr_coreb;
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
#define BFIN_IPI_RESCHEDULE 0
#define BFIN_IPI_CALL_FUNC 1
#define BFIN_IPI_CPU_STOP 2
@@ -65,8 +60,7 @@ struct smp_call_struct {
void (*func)(void *info);
void *info;
int wait;
- cpumask_t pending;
- cpumask_t waitmask;
+ cpumask_t *waitmask;
};
static struct blackfin_flush_data smp_flush_data;
@@ -74,15 +68,19 @@ static struct blackfin_flush_data smp_flush_data;
static DEFINE_SPINLOCK(stop_lock);
struct ipi_message {
- struct list_head list;
unsigned long type;
struct smp_call_struct call_struct;
};
+/* A magic number - stress test shows this is safe for common cases */
+#define BFIN_IPI_MSGQ_LEN 5
+
+/* Simple FIFO buffer, overflow leads to panic */
struct ipi_message_queue {
- struct list_head head;
spinlock_t lock;
unsigned long count;
+ unsigned long head; /* head of the queue */
+ struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
};
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
@@ -121,7 +119,6 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
func = msg->call_struct.func;
info = msg->call_struct.info;
wait = msg->call_struct.wait;
- cpu_clear(cpu, msg->call_struct.pending);
func(info);
if (wait) {
#ifdef __ARCH_SYNC_CORE_DCACHE
@@ -132,51 +129,57 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
*/
resync_core_dcache();
#endif
- cpu_clear(cpu, msg->call_struct.waitmask);
- } else
- kfree(msg);
+ cpu_clear(cpu, *msg->call_struct.waitmask);
+ }
+}
+
+/* Use IRQ_SUPPLE_0 to request reschedule.
+ * When returning from interrupt to user space,
+ * there is chance to reschedule */
+static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
+{
+ unsigned int cpu = smp_processor_id();
+
+ platform_clear_ipi(cpu, IRQ_SUPPLE_0);
+ return IRQ_HANDLED;
}
-static irqreturn_t ipi_handler(int irq, void *dev_instance)
+static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{
struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
+ unsigned long flags;
- platform_clear_ipi(cpu);
+ platform_clear_ipi(cpu, IRQ_SUPPLE_1);
msg_queue = &__get_cpu_var(ipi_msg_queue);
- msg_queue->count++;
- spin_lock(&msg_queue->lock);
- while (!list_empty(&msg_queue->head)) {
- msg = list_entry(msg_queue->head.next, typeof(*msg), list);
- list_del(&msg->list);
+ spin_lock_irqsave(&msg_queue->lock, flags);
+
+ while (msg_queue->count) {
+ msg = &msg_queue->ipi_message[msg_queue->head];
switch (msg->type) {
- case BFIN_IPI_RESCHEDULE:
- /* That's the easiest one; leave it to
- * return_from_int. */
- kfree(msg);
- break;
case BFIN_IPI_CALL_FUNC:
- spin_unlock(&msg_queue->lock);
+ spin_unlock_irqrestore(&msg_queue->lock, flags);
ipi_call_function(cpu, msg);
- spin_lock(&msg_queue->lock);
+ spin_lock_irqsave(&msg_queue->lock, flags);
break;
case BFIN_IPI_CPU_STOP:
- spin_unlock(&msg_queue->lock);
+ spin_unlock_irqrestore(&msg_queue->lock, flags);
ipi_cpu_stop(cpu);
- spin_lock(&msg_queue->lock);
- kfree(msg);
+ spin_lock_irqsave(&msg_queue->lock, flags);
break;
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
cpu, msg->type);
- kfree(msg);
break;
}
+ msg_queue->head++;
+ msg_queue->head %= BFIN_IPI_MSGQ_LEN;
+ msg_queue->count--;
}
- spin_unlock(&msg_queue->lock);
+ spin_unlock_irqrestore(&msg_queue->lock, flags);
return IRQ_HANDLED;
}
@@ -186,48 +189,47 @@ static void ipi_queue_init(void)
struct ipi_message_queue *msg_queue;
for_each_possible_cpu(cpu) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
- INIT_LIST_HEAD(&msg_queue->head);
spin_lock_init(&msg_queue->lock);
msg_queue->count = 0;
+ msg_queue->head = 0;
}
}
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+static inline void smp_send_message(cpumask_t callmap, unsigned long type,
+ void (*func) (void *info), void *info, int wait)
{
unsigned int cpu;
- cpumask_t callmap;
- unsigned long flags;
struct ipi_message_queue *msg_queue;
struct ipi_message *msg;
-
- callmap = cpu_online_map;
- cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- return 0;
-
- msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
- if (!msg)
- return -ENOMEM;
- INIT_LIST_HEAD(&msg->list);
- msg->call_struct.func = func;
- msg->call_struct.info = info;
- msg->call_struct.wait = wait;
- msg->call_struct.pending = callmap;
- msg->call_struct.waitmask = callmap;
- msg->type = BFIN_IPI_CALL_FUNC;
+ unsigned long flags, next_msg;
+ cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add_tail(&msg->list, &msg_queue->head);
+ if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
+ next_msg = (msg_queue->head + msg_queue->count)
+ % BFIN_IPI_MSGQ_LEN;
+ msg = &msg_queue->ipi_message[next_msg];
+ msg->type = type;
+ if (type == BFIN_IPI_CALL_FUNC) {
+ msg->call_struct.func = func;
+ msg->call_struct.info = info;
+ msg->call_struct.wait = wait;
+ msg->call_struct.waitmask = &waitmask;
+ }
+ msg_queue->count++;
+ } else
+ panic("IPI message queue overflow\n");
spin_unlock_irqrestore(&msg_queue->lock, flags);
- platform_send_ipi_cpu(cpu);
+ platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
}
+
if (wait) {
- while (!cpus_empty(msg->call_struct.waitmask))
+ while (!cpus_empty(waitmask))
blackfin_dcache_invalidate_range(
- (unsigned long)(&msg->call_struct.waitmask),
- (unsigned long)(&msg->call_struct.waitmask));
+ (unsigned long)(&waitmask),
+ (unsigned long)(&waitmask));
#ifdef __ARCH_SYNC_CORE_DCACHE
/*
* Invalidate D cache in case shared data was changed by
@@ -235,8 +237,20 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
*/
resync_core_dcache();
#endif
- kfree(msg);
}
+}
+
+int smp_call_function(void (*func)(void *info), void *info, int wait)
+{
+ cpumask_t callmap;
+
+ callmap = cpu_online_map;
+ cpu_clear(smp_processor_id(), callmap);
+ if (cpus_empty(callmap))
+ return 0;
+
+ smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
+
return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function);
@@ -246,100 +260,39 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
{
unsigned int cpu = cpuid;
cpumask_t callmap;
- unsigned long flags;
- struct ipi_message_queue *msg_queue;
- struct ipi_message *msg;
if (cpu_is_offline(cpu))
return 0;
cpus_clear(callmap);
cpu_set(cpu, callmap);
- msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
- if (!msg)
- return -ENOMEM;
- INIT_LIST_HEAD(&msg->list);
- msg->call_struct.func = func;
- msg->call_struct.info = info;
- msg->call_struct.wait = wait;
- msg->call_struct.pending = callmap;
- msg->call_struct.waitmask = callmap;
- msg->type = BFIN_IPI_CALL_FUNC;
-
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_irqsave(&msg_queue->lock, flags);
- list_add_tail(&msg->list, &msg_queue->head);
- spin_unlock_irqrestore(&msg_queue->lock, flags);
- platform_send_ipi_cpu(cpu);
+ smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
- if (wait) {
- while (!cpus_empty(msg->call_struct.waitmask))
- blackfin_dcache_invalidate_range(
- (unsigned long)(&msg->call_struct.waitmask),
- (unsigned long)(&msg->call_struct.waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
- /*
- * Invalidate D cache in case shared data was changed by
- * other processors to ensure cache coherence.
- */
- resync_core_dcache();
-#endif
- kfree(msg);
- }
return 0;
}
EXPORT_SYMBOL_GPL(smp_call_function_single);
void smp_send_reschedule(int cpu)
{
- unsigned long flags;
- struct ipi_message_queue *msg_queue;
- struct ipi_message *msg;
-
+ /* simply trigger an ipi */
if (cpu_is_offline(cpu))
return;
-
- msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
- if (!msg)
- return;
- INIT_LIST_HEAD(&msg->list);
- msg->type = BFIN_IPI_RESCHEDULE;
-
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_irqsave(&msg_queue->lock, flags);
- list_add_tail(&msg->list, &msg_queue->head);
- spin_unlock_irqrestore(&msg_queue->lock, flags);
- platform_send_ipi_cpu(cpu);
+ platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
return;
}
void smp_send_stop(void)
{
- unsigned int cpu;
cpumask_t callmap;
- unsigned long flags;
- struct ipi_message_queue *msg_queue;
- struct ipi_message *msg;
callmap = cpu_online_map;
cpu_clear(smp_processor_id(), callmap);
if (cpus_empty(callmap))
return;
- msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
- if (!msg)
- return;
- INIT_LIST_HEAD(&msg->list);
- msg->type = BFIN_IPI_CPU_STOP;
+ smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
- for_each_cpu_mask(cpu, callmap) {
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_irqsave(&msg_queue->lock, flags);
- list_add_tail(&msg->list, &msg_queue->head);
- spin_unlock_irqrestore(&msg_queue->lock, flags);
- platform_send_ipi_cpu(cpu);
- }
return;
}
@@ -446,7 +399,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
platform_prepare_cpus(max_cpus);
ipi_queue_init();
- platform_request_ipi(&ipi_handler);
+ platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
+ platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
}
void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 627e04b5ba9..dfd304a4a3e 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -704,18 +704,18 @@ int sram_free_with_lsl(const void *addr)
{
struct sram_list_struct *lsl, **tmp;
struct mm_struct *mm = current->mm;
+ int ret = -1;
for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
- if ((*tmp)->addr == addr)
- goto found;
- return -1;
-found:
- lsl = *tmp;
- sram_free(addr);
- *tmp = lsl->next;
- kfree(lsl);
+ if ((*tmp)->addr == addr) {
+ lsl = *tmp;
+ ret = sram_free(addr);
+ *tmp = lsl->next;
+ kfree(lsl);
+ break;
+ }
- return 0;
+ return ret;
}
EXPORT_SYMBOL(sram_free_with_lsl);
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index e66e25c4b0b..012e377330c 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -23,8 +23,4 @@ config HEART_BEAT
This option turns on/off heart beat kernel functionality.
First GPIO node is taken.
-config DEBUG_BOOTMEM
- depends on DEBUG_KERNEL
- bool "Debug BOOTMEM initialization"
-
endmenu
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 15f1f1d1840..6f432e6df9a 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -17,7 +17,7 @@ export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV
# The various CONFIG_XILINX cpu features options are integers 0/1/2...
# rather than bools y/n
-# Work out HW multipler support. This is icky.
+# Work out HW multipler support. This is tricky.
# 1. Spartan2 has no HW multiplers.
# 2. MicroBlaze v3.x always uses them, except in Spartan 2
# 3. All other FPGa/CPU ver combos, we can trust the CONFIG_ settings
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 8b422b12ef7..ab8fbe7ad90 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -66,5 +66,4 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_BOOTMEM=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 37db96a15b4..a10bec62e85 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -1,9 +1,9 @@
/*
* Support for the MicroBlaze PVR (Processor Version Register)
*
- * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007 John Williams <john.williams@petalogix.com>
- * Copyright (C) 2007 - 2009 PetaLogix
+ * Copyright (C) 2007 - 2011 PetaLogix
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
@@ -46,11 +46,11 @@ struct pvr_s {
#define PVR2_I_LMB_MASK 0x10000000
#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
-#define PVR2_D_PLB_MASK 0x02000000 /* new */
-#define PVR2_I_PLB_MASK 0x01000000 /* new */
-#define PVR2_INTERCONNECT 0x00800000 /* new */
-#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */
-#define PVR2_USE_FSL_EXC 0x00040000 /* new */
+#define PVR2_D_PLB_MASK 0x02000000 /* new */
+#define PVR2_I_PLB_MASK 0x01000000 /* new */
+#define PVR2_INTERCONNECT 0x00800000 /* new */
+#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */
+#define PVR2_USE_FSL_EXC 0x00040000 /* new */
#define PVR2_USE_MSR_INSTR 0x00020000
#define PVR2_USE_PCMP_INSTR 0x00010000
#define PVR2_AREA_OPTIMISED 0x00008000
@@ -59,7 +59,7 @@ struct pvr_s {
#define PVR2_USE_HW_MUL_MASK 0x00001000
#define PVR2_USE_FPU_MASK 0x00000800
#define PVR2_USE_MUL64_MASK 0x00000400
-#define PVR2_USE_FPU2_MASK 0x00000200 /* new */
+#define PVR2_USE_FPU2_MASK 0x00000200 /* new */
#define PVR2_USE_IPLBEXC 0x00000100
#define PVR2_USE_DPLBEXC 0x00000080
#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040
@@ -122,96 +122,103 @@ struct pvr_s {
/* PVR access macros */
-#define PVR_IS_FULL(pvr) (pvr.pvr[0] & PVR0_PVR_FULL_MASK)
-#define PVR_USE_BARREL(pvr) (pvr.pvr[0] & PVR0_USE_BARREL_MASK)
-#define PVR_USE_DIV(pvr) (pvr.pvr[0] & PVR0_USE_DIV_MASK)
-#define PVR_USE_HW_MUL(pvr) (pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
-#define PVR_USE_FPU(pvr) (pvr.pvr[0] & PVR0_USE_FPU_MASK)
-#define PVR_USE_FPU2(pvr) (pvr.pvr[2] & PVR2_USE_FPU2_MASK)
-#define PVR_USE_ICACHE(pvr) (pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
-#define PVR_USE_DCACHE(pvr) (pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
-#define PVR_VERSION(pvr) ((pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
-#define PVR_USER1(pvr) (pvr.pvr[0] & PVR0_USER1_MASK)
-#define PVR_USER2(pvr) (pvr.pvr[1] & PVR1_USER2_MASK)
-
-#define PVR_D_OPB(pvr) (pvr.pvr[2] & PVR2_D_OPB_MASK)
-#define PVR_D_LMB(pvr) (pvr.pvr[2] & PVR2_D_LMB_MASK)
-#define PVR_I_OPB(pvr) (pvr.pvr[2] & PVR2_I_OPB_MASK)
-#define PVR_I_LMB(pvr) (pvr.pvr[2] & PVR2_I_LMB_MASK)
-#define PVR_INTERRUPT_IS_EDGE(pvr) \
- (pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
-#define PVR_EDGE_IS_POSITIVE(pvr) \
- (pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
-#define PVR_USE_MSR_INSTR(pvr) (pvr.pvr[2] & PVR2_USE_MSR_INSTR)
-#define PVR_USE_PCMP_INSTR(pvr) (pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
-#define PVR_AREA_OPTIMISED(pvr) (pvr.pvr[2] & PVR2_AREA_OPTIMISED)
-#define PVR_USE_MUL64(pvr) (pvr.pvr[2] & PVR2_USE_MUL64_MASK)
-#define PVR_OPCODE_0x0_ILLEGAL(pvr) \
- (pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
-#define PVR_UNALIGNED_EXCEPTION(pvr) \
- (pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
-#define PVR_ILL_OPCODE_EXCEPTION(pvr) \
- (pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
-#define PVR_IOPB_BUS_EXCEPTION(pvr) \
- (pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
-#define PVR_DOPB_BUS_EXCEPTION(pvr) \
- (pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
-#define PVR_DIV_ZERO_EXCEPTION(pvr) \
- (pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
-#define PVR_FPU_EXCEPTION(pvr) (pvr.pvr[2] & PVR2_FPU_EXC_MASK)
-#define PVR_FSL_EXCEPTION(pvr) (pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
-
-#define PVR_DEBUG_ENABLED(pvr) (pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
-#define PVR_NUMBER_OF_PC_BRK(pvr) \
- ((pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
-#define PVR_NUMBER_OF_RD_ADDR_BRK(pvr) \
- ((pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
-#define PVR_NUMBER_OF_WR_ADDR_BRK(pvr) \
- ((pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
-#define PVR_FSL_LINKS(pvr) ((pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
-
-#define PVR_ICACHE_ADDR_TAG_BITS(pvr) \
- ((pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_ICACHE_USE_FSL(pvr) (pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
-#define PVR_ICACHE_ALLOW_WR(pvr) (pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
-#define PVR_ICACHE_LINE_LEN(pvr) \
- (1 << ((pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
-#define PVR_ICACHE_BYTE_SIZE(pvr) \
- (1 << ((pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
-
-#define PVR_DCACHE_ADDR_TAG_BITS(pvr) \
- ((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_DCACHE_USE_FSL(pvr) (pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
-#define PVR_DCACHE_ALLOW_WR(pvr) (pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
+#define PVR_IS_FULL(_pvr) (_pvr.pvr[0] & PVR0_PVR_FULL_MASK)
+#define PVR_USE_BARREL(_pvr) (_pvr.pvr[0] & PVR0_USE_BARREL_MASK)
+#define PVR_USE_DIV(_pvr) (_pvr.pvr[0] & PVR0_USE_DIV_MASK)
+#define PVR_USE_HW_MUL(_pvr) (_pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
+#define PVR_USE_FPU(_pvr) (_pvr.pvr[0] & PVR0_USE_FPU_MASK)
+#define PVR_USE_FPU2(_pvr) (_pvr.pvr[2] & PVR2_USE_FPU2_MASK)
+#define PVR_USE_ICACHE(_pvr) (_pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
+#define PVR_USE_DCACHE(_pvr) (_pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
+#define PVR_VERSION(_pvr) ((_pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
+#define PVR_USER1(_pvr) (_pvr.pvr[0] & PVR0_USER1_MASK)
+#define PVR_USER2(_pvr) (_pvr.pvr[1] & PVR1_USER2_MASK)
+
+#define PVR_D_OPB(_pvr) (_pvr.pvr[2] & PVR2_D_OPB_MASK)
+#define PVR_D_LMB(_pvr) (_pvr.pvr[2] & PVR2_D_LMB_MASK)
+#define PVR_I_OPB(_pvr) (_pvr.pvr[2] & PVR2_I_OPB_MASK)
+#define PVR_I_LMB(_pvr) (_pvr.pvr[2] & PVR2_I_LMB_MASK)
+#define PVR_INTERRUPT_IS_EDGE(_pvr) \
+ (_pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
+#define PVR_EDGE_IS_POSITIVE(_pvr) \
+ (_pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
+#define PVR_USE_MSR_INSTR(_pvr) (_pvr.pvr[2] & PVR2_USE_MSR_INSTR)
+#define PVR_USE_PCMP_INSTR(_pvr) (_pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
+#define PVR_AREA_OPTIMISED(_pvr) (_pvr.pvr[2] & PVR2_AREA_OPTIMISED)
+#define PVR_USE_MUL64(_pvr) (_pvr.pvr[2] & PVR2_USE_MUL64_MASK)
+#define PVR_OPCODE_0x0_ILLEGAL(_pvr) \
+ (_pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
+#define PVR_UNALIGNED_EXCEPTION(_pvr) \
+ (_pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
+#define PVR_ILL_OPCODE_EXCEPTION(_pvr) \
+ (_pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
+#define PVR_IOPB_BUS_EXCEPTION(_pvr) \
+ (_pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
+#define PVR_DOPB_BUS_EXCEPTION(_pvr) \
+ (_pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
+#define PVR_DIV_ZERO_EXCEPTION(_pvr) \
+ (_pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
+#define PVR_FPU_EXCEPTION(_pvr) (_pvr.pvr[2] & PVR2_FPU_EXC_MASK)
+#define PVR_FSL_EXCEPTION(_pvr) (_pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
+
+#define PVR_DEBUG_ENABLED(_pvr) (_pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
+#define PVR_NUMBER_OF_PC_BRK(_pvr) \
+ ((_pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
+#define PVR_NUMBER_OF_RD_ADDR_BRK(_pvr) \
+ ((_pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
+#define PVR_NUMBER_OF_WR_ADDR_BRK(_pvr) \
+ ((_pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
+#define PVR_FSL_LINKS(_pvr) ((_pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
+
+#define PVR_ICACHE_ADDR_TAG_BITS(_pvr) \
+ ((_pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_ICACHE_USE_FSL(_pvr) \
+ (_pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
+#define PVR_ICACHE_ALLOW_WR(_pvr) \
+ (_pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
+#define PVR_ICACHE_LINE_LEN(_pvr) \
+ (1 << ((_pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
+#define PVR_ICACHE_BYTE_SIZE(_pvr) \
+ (1 << ((_pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
+
+#define PVR_DCACHE_ADDR_TAG_BITS(_pvr) \
+ ((_pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_DCACHE_USE_FSL(_pvr) (_pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
+#define PVR_DCACHE_ALLOW_WR(_pvr) \
+ (_pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
/* FIXME two shifts on one line needs any comment */
-#define PVR_DCACHE_LINE_LEN(pvr) \
- (1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
-#define PVR_DCACHE_BYTE_SIZE(pvr) \
- (1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_DCACHE_LINE_LEN(_pvr) \
+ (1 << ((_pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
+#define PVR_DCACHE_BYTE_SIZE(_pvr) \
+ (1 << ((_pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
-#define PVR_DCACHE_USE_WRITEBACK(pvr) \
- ((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
+#define PVR_DCACHE_USE_WRITEBACK(_pvr) \
+ ((_pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
-#define PVR_ICACHE_BASEADDR(pvr) (pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
-#define PVR_ICACHE_HIGHADDR(pvr) (pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_ICACHE_BASEADDR(_pvr) \
+ (_pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
+#define PVR_ICACHE_HIGHADDR(_pvr) \
+ (_pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_DCACHE_BASEADDR(_pvr) \
+ (_pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
+#define PVR_DCACHE_HIGHADDR(_pvr) \
+ (_pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
-#define PVR_DCACHE_BASEADDR(pvr) (pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
-#define PVR_DCACHE_HIGHADDR(pvr) (pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
+#define PVR_TARGET_FAMILY(_pvr) \
+ ((_pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
-#define PVR_TARGET_FAMILY(pvr) ((pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
-
-#define PVR_MSR_RESET_VALUE(pvr) \
- (pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
+#define PVR_MSR_RESET_VALUE(_pvr) \
+ (_pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
/* mmu */
-#define PVR_USE_MMU(pvr) ((pvr.pvr[11] & PVR11_USE_MMU) >> 30)
-#define PVR_MMU_ITLB_SIZE(pvr) (pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
-#define PVR_MMU_DTLB_SIZE(pvr) (pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
-#define PVR_MMU_TLB_ACCESS(pvr) (pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
-#define PVR_MMU_ZONES(pvr) (pvr.pvr[11] & PVR11_MMU_ZONES)
+#define PVR_USE_MMU(_pvr) ((_pvr.pvr[11] & PVR11_USE_MMU) >> 30)
+#define PVR_MMU_ITLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
+#define PVR_MMU_DTLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
+#define PVR_MMU_TLB_ACCESS(_pvr) (_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
+#define PVR_MMU_ZONES(_pvr) (_pvr.pvr[11] & PVR11_MMU_ZONES)
/* endian */
-#define PVR_ENDIAN(pvr) (pvr.pvr[0] & PVR0_ENDI)
+#define PVR_ENDIAN(_pvr) (_pvr.pvr[0] & PVR0_ENDI)
int cpu_has_pvr(void);
void get_pvr(struct pvr_s *pvr);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 87c79fa275c..2c309fccf23 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -32,6 +32,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"7.30.a", 0x10},
{"7.30.b", 0x11},
{"8.00.a", 0x12},
+ {"8.00.b", 0x13},
{NULL, 0},
};
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 819238b8a42..41c30cdb270 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -287,25 +287,44 @@
* are masked. This is nice, means we don't have to CLI before state save
*/
C_ENTRY(_user_exception):
- addi r14, r14, 4 /* return address is 4 byte after call */
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ addi r14, r14, 4 /* return address is 4 byte after call */
+
+ mfs r1, rmsr
+ nop
+ andi r1, r1, MSR_UMS
+ bnei r1, 1f
+
+/* Kernel-mode state save - kernel execve */
+ lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+ tophys(r1,r1);
+
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+ SAVE_REGS
+ swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
+ brid 2f;
+ nop; /* Fill delay slot */
+
+/* User-mode state save. */
+1:
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
- /* MS these three instructions can be added to one */
- /* addik r1, r1, THREAD_SIZE; */
- /* tophys(r1,r1); */
- /* addik r1, r1, -STATE_SAVE_SIZE; */
- addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
+/* calculate kernel stack pointer from task struct 8k */
+ addik r1, r1, THREAD_SIZE;
+ tophys(r1,r1);
+
+ addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
SAVE_REGS
swi r0, r1, PTO + PT_R3
swi r0, r1, PTO + PT_R4
+ swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1; /* Store user SP. */
clear_ums;
- lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r12, r1, PTO+PT_R0;
tovirt(r1,r1)
@@ -375,6 +394,9 @@ C_ENTRY(ret_from_trap):
swi r3, r1, PTO + PT_R3
swi r4, r1, PTO + PT_R4
+ lwi r11, r1, PTO + PT_MODE;
+/* See if returning to kernel mode, if so, skip resched &c. */
+ bnei r11, 2f;
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
/* FIXME: Restructure all these flag checks. */
@@ -417,6 +439,16 @@ C_ENTRY(ret_from_trap):
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
+ bri 6f;
+
+/* Return to kernel state. */
+2: set_bip; /* Ints masked for state restore */
+ VM_OFF;
+ tophys(r1,r1);
+ RESTORE_REGS;
+ addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
+ tovirt(r1,r1);
+6:
TRAP_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
nop;
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 478f2943ede..a7fa6ae76d8 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -25,6 +25,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <asm/current.h>
+#include <asm/cacheflush.h>
#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
#define MICROBLAZE_IBUS_EXCEPTION 0x03
@@ -52,6 +53,8 @@ void die(const char *str, struct pt_regs *fp, long err)
void sw_exception(struct pt_regs *regs)
{
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
+ flush_dcache_range(regs->r16, regs->r16 + 0x4);
+ flush_icache_range(regs->r16, regs->r16 + 0x4);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 781195438ee..25f6e07d8de 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -945,11 +945,20 @@ store3: sbi r3, r4, 2;
store4: sbi r3, r4, 3; /* Delay slot */
ex_shw_vm:
/* Store the lower half-word, byte-by-byte into destination address */
+#ifdef __MICROBLAZEEL__
+ lbui r3, r5, 0;
+store5: sbi r3, r4, 0;
+ lbui r3, r5, 1;
+ brid ret_from_exc;
+store6: sbi r3, r4, 1; /* Delay slot */
+#else
lbui r3, r5, 2;
store5: sbi r3, r4, 0;
lbui r3, r5, 3;
brid ret_from_exc;
store6: sbi r3, r4, 1; /* Delay slot */
+#endif
+
ex_sw_end_vm: /* Exception handling of store word, ends. */
/* We have to prevent cases that get/put_user macros get unaligned pointer
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index a105301e2b7..c881393f07f 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -61,14 +61,12 @@ static int __init early_init_dt_scan_serial(unsigned long node,
char *p;
int *addr;
- pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+ pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname);
/* find all serial nodes */
if (strncmp(uname, "serial", 6) != 0)
return 0;
- early_init_dt_check_for_initrd(node);
-
/* find compatible node with uartlite */
p = of_get_flat_dt_prop(node, "compatible", &l);
if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 96a88c31fe4..3451bdec9f0 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -123,20 +123,10 @@ SECTIONS {
__init_end_before_initramfs = .;
- .init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- . = ALIGN(4);
- LONG(0);
-/*
- * FIXME this can break initramfs for MMU.
- * Pad init.ramfs up to page boundary,
- * so that __init_end == __bss_start. This will make image.elf
- * consistent with the image.bin
- */
- /* . = ALIGN(PAGE_SIZE); */
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+ INIT_RAM_FS
}
+
__init_end = .;
.bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 123e3616f2d..810fd68775e 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -182,7 +182,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
for (; c >= 4; c -= 4) {
value = *--i_src;
*--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
- buf_hold = (value & 0xFFFFFF) << 8;;
+ buf_hold = (value & 0xFFFFFF) << 8;
}
#endif
/* Realign the source */
diff --git a/arch/microblaze/lib/muldi3.S b/arch/microblaze/lib/muldi3.S
deleted file mode 100644
index ceeaa8c407f..00000000000
--- a/arch/microblaze/lib/muldi3.S
+++ /dev/null
@@ -1,121 +0,0 @@
-#include <linux/linkage.h>
-
-/*
- * Multiply operation for 64 bit integers, for devices with hard multiply
- * Input : Operand1[H] in Reg r5
- * Operand1[L] in Reg r6
- * Operand2[H] in Reg r7
- * Operand2[L] in Reg r8
- * Output: Result[H] in Reg r3
- * Result[L] in Reg r4
- *
- * Explaination:
- *
- * Both the input numbers are divided into 16 bit number as follows
- * op1 = A B C D
- * op2 = E F G H
- * result = D * H
- * + (C * H + D * G) << 16
- * + (B * H + C * G + D * F) << 32
- * + (A * H + B * G + C * F + D * E) << 48
- *
- * Only 64 bits of the output are considered
- */
-
- .text
- .globl __muldi3
- .type __muldi3, @function
- .ent __muldi3
-
-__muldi3:
- addi r1, r1, -40
-
-/* Save the input operands on the caller's stack */
- swi r5, r1, 44
- swi r6, r1, 48
- swi r7, r1, 52
- swi r8, r1, 56
-
-/* Store all the callee saved registers */
- sw r20, r1, r0
- swi r21, r1, 4
- swi r22, r1, 8
- swi r23, r1, 12
- swi r24, r1, 16
- swi r25, r1, 20
- swi r26, r1, 24
- swi r27, r1, 28
-
-/* Load all the 16 bit values for A thru H */
- lhui r20, r1, 44 /* A */
- lhui r21, r1, 46 /* B */
- lhui r22, r1, 48 /* C */
- lhui r23, r1, 50 /* D */
- lhui r24, r1, 52 /* E */
- lhui r25, r1, 54 /* F */
- lhui r26, r1, 56 /* G */
- lhui r27, r1, 58 /* H */
-
-/* D * H ==> LSB of the result on stack ==> Store1 */
- mul r9, r23, r27
- swi r9, r1, 36 /* Pos2 and Pos3 */
-
-/* Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2 */
-/* Store the carry generated in position 2 for Pos 3 */
- lhui r11, r1, 36 /* Pos2 */
- mul r9, r22, r27 /* C * H */
- mul r10, r23, r26 /* D * G */
- add r9, r9, r10
- addc r12, r0, r0
- add r9, r9, r11
- addc r12, r12, r0 /* Store the Carry */
- shi r9, r1, 36 /* Store Pos2 */
- swi r9, r1, 32
- lhui r11, r1, 32
- shi r11, r1, 34 /* Store Pos1 */
-
-/* Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1 */
- mul r9, r21, r27 /* B * H */
- mul r10, r22, r26 /* C * G */
- mul r7, r23, r25 /* D * F */
- add r9, r9, r11
- add r9, r9, r10
- add r9, r9, r7
- swi r9, r1, 32 /* Pos0 and Pos1 */
-
-/* Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0 */
- lhui r11, r1, 32 /* Pos0 */
- mul r9, r20, r27 /* A * H */
- mul r10, r21, r26 /* B * G */
- mul r7, r22, r25 /* C * F */
- mul r8, r23, r24 /* D * E */
- add r9, r9, r11
- add r9, r9, r10
- add r9, r9, r7
- add r9, r9, r8
- sext16 r9, r9 /* Sign extend the MSB */
- shi r9, r1, 32
-
-/* Move results to r3 and r4 */
- lhui r3, r1, 32
- add r3, r3, r12
- shi r3, r1, 32
- lwi r3, r1, 32 /* Hi Part */
- lwi r4, r1, 36 /* Lo Part */
-
-/* Restore Callee saved registers */
- lw r20, r1, r0
- lwi r21, r1, 4
- lwi r22, r1, 8
- lwi r23, r1, 12
- lwi r24, r1, 16
- lwi r25, r1, 20
- lwi r26, r1, 24
- lwi r27, r1, 28
-
-/* Restore Frame and return */
- rtsd r15, 8
- addi r1, r1, 40
-
-.size __muldi3, . - __muldi3
-.end __muldi3
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c
new file mode 100644
index 00000000000..d4860e154d2
--- /dev/null
+++ b/arch/microblaze/lib/muldi3.c
@@ -0,0 +1,60 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+#define DWtype long long
+#define UWtype unsigned long
+#define UHWtype unsigned short
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C. */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __x0, __x1, __x2, __x3; \
+ UHWtype __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart(u); \
+ __uh = __ll_highpart(u); \
+ __vl = __ll_lowpart(v); \
+ __vh = __ll_highpart(v); \
+ \
+ __x0 = (UWtype) __ul * __vl; \
+ __x1 = (UWtype) __ul * __vh; \
+ __x2 = (UWtype) __uh * __vl; \
+ __x3 = (UWtype) __uh * __vh; \
+ \
+ __x1 += __ll_highpart(__x0); /* this can't give carry */\
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos */ \
+ \
+ (w1) = __x3 + __ll_highpart(__x1); \
+ (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+ } while (0)
+#endif
+
+#if !defined(__umulsidi3)
+#define __umulsidi3(u, v) ({ \
+ DWunion __w; \
+ umul_ppmm(__w.s.high, __w.s.low, u, v); \
+ __w.ll; \
+ })
+#endif
+
+DWtype __muldi3(DWtype u, DWtype v)
+{
+ const DWunion uu = {.ll = u};
+ const DWunion vv = {.ll = v};
+ DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
+
+ w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+ + (UWtype) uu.s.high * (UWtype) vv.s.low);
+
+ return w.ll;
+}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 48fb4790bfe..959f38ccb9a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -20,6 +20,9 @@ config WORD_SIZE
config ARCH_PHYS_ADDR_T_64BIT
def_bool PPC64 || PHYS_64BIT
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool ARCH_PHYS_ADDR_T_64BIT
+
config MMU
bool
default y
@@ -209,7 +212,7 @@ config ARCH_HIBERNATION_POSSIBLE
config ARCH_SUSPEND_POSSIBLE
def_bool y
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
- PPC_85xx || PPC_86xx || PPC_PSERIES
+ PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
config PPC_DCR_NATIVE
bool
@@ -595,13 +598,11 @@ config EXTRA_TARGETS
If unsure, leave blank
-if !44x || BROKEN
config ARCH_WANTS_FREEZER_CONTROL
def_bool y
depends on ADB_PMU
source kernel/power/Kconfig
-endif
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
@@ -682,6 +683,15 @@ config FSL_PMC
Freescale MPC85xx/MPC86xx power management controller support
(suspend/resume). For MPC83xx see platforms/83xx/suspend.c
+config PPC4xx_CPM
+ bool
+ default y
+ depends on SUSPEND && (44x || 40x)
+ help
+ PPC4xx Clock Power Management (CPM) support (suspend/resume).
+ It also enables support for two different idle states (idle-wait
+ and idle-doze).
+
config 4xx_SOC
bool
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index a3037039625..5b27a4b74b7 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -105,6 +105,15 @@
dcr-reg = <0x00c 0x002>;
};
+ CPM0: cpm {
+ compatible = "ibm,cpm";
+ dcr-access-method = "native";
+ dcr-reg = <0x160 0x003>;
+ unused-units = <0x00000100>;
+ idle-doze = <0x02000000>;
+ standby = <0xfeff791d>;
+ };
+
L2C0: l2c {
compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */
@@ -270,28 +279,6 @@
interrupts = <0x1 0x4>;
};
- UART2: serial@ef600500 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600500 0x00000008>;
- virtual-reg = <0xef600500>;
- clock-frequency = <0>; /* Filled in by U-Boot */
- current-speed = <0>; /* Filled in by U-Boot */
- interrupt-parent = <&UIC1>;
- interrupts = <28 0x4>;
- };
-
- UART3: serial@ef600600 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600600 0x00000008>;
- virtual-reg = <0xef600600>;
- clock-frequency = <0>; /* Filled in by U-Boot */
- current-speed = <0>; /* Filled in by U-Boot */
- interrupt-parent = <&UIC1>;
- interrupts = <29 0x4>;
- };
-
IIC0: i2c@ef600700 {
compatible = "ibm,iic-460ex", "ibm,iic";
reg = <0xef600700 0x00000014>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 083e68eeaca..89edb16649c 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -82,6 +82,15 @@
interrupt-parent = <&UIC0>;
};
+ CPM0: cpm {
+ compatible = "ibm,cpm";
+ dcr-access-method = "native";
+ dcr-reg = <0x0b0 0x003>;
+ unused-units = <0x00000000>;
+ idle-doze = <0x02000000>;
+ standby = <0xe3e74800>;
+ };
+
plb {
compatible = "ibm,plb-405ex", "ibm,plb4";
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 05a76ccfd49..697b3f6b78b 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -297,6 +297,14 @@
interrupt-parent = < &ipic >;
};
+ dma@2c000 {
+ compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+ reg = <0x2c000 0x1800>;
+ interrupts = <3 0x8
+ 94 0x8>;
+ interrupt-parent = < &ipic >;
+ };
+
};
pci0: pcie@e0009000 {
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index a97eb2db5a1..d3db02f98dd 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -265,6 +265,14 @@
interrupt-parent = < &ipic >;
};
+ dma@2c000 {
+ compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+ reg = <0x2c000 0x1800>;
+ interrupts = <3 0x8
+ 94 0x8>;
+ interrupt-parent = < &ipic >;
+ };
+
};
pci0: pcie@e0009000 {
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 4e19ee7ce4e..34b8c1a1e75 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -12,6 +12,8 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_KILAUEA=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
# CONFIG_WALNUT is not set
CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
@@ -42,6 +44,9 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 45c64d818b2..17e4dd98eed 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -42,6 +42,9 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 30964ae2d09..8a7e9314c68 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -267,7 +267,16 @@ static __inline__ int fls64(__u64 x)
#include <asm-generic/bitops/fls64.h>
#endif /* __powerpc64__ */
+#ifdef CONFIG_PPC64
+unsigned int __arch_hweight8(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+#include <asm-generic/bitops/const_hweight.h>
+#else
#include <asm-generic/bitops/hweight.h>
+#endif
+
#include <asm-generic/bitops/find.h>
/* Little-endian versions */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f3a1fdd9cf0..f0a211d9692 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -199,6 +199,8 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
#ifndef __ASSEMBLY__
@@ -403,21 +405,22 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
- CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_POPCNTB)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
- CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
- CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index a8e18447c62..f71bb4c118b 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -61,22 +61,25 @@ static inline cpumask_t cpu_online_cores_map(void)
return cpu_thread_mask_to_cores(cpu_online_map);
}
-static inline int cpu_thread_to_core(int cpu)
-{
- return cpu >> threads_shift;
-}
+#ifdef CONFIG_SMP
+int cpu_core_index_of_thread(int cpu);
+int cpu_first_thread_of_core(int core);
+#else
+static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
+static inline int cpu_first_thread_of_core(int core) { return core; }
+#endif
static inline int cpu_thread_in_core(int cpu)
{
return cpu & (threads_per_core - 1);
}
-static inline int cpu_first_thread_in_core(int cpu)
+static inline int cpu_first_thread_sibling(int cpu)
{
return cpu & ~(threads_per_core - 1);
}
-static inline int cpu_last_thread_in_core(int cpu)
+static inline int cpu_last_thread_sibling(int cpu)
{
return cpu | (threads_per_core - 1);
}
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index a3954e4fcbe..16d25c0974b 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -9,6 +9,12 @@
struct dma_map_ops;
struct device_node;
+/*
+ * Arch extensions to struct device.
+ *
+ * When adding fields, consider macio_add_one_device in
+ * drivers/macintosh/macio_asic.c
+ */
struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 20778a405d7..4ef662e4a31 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -46,6 +46,7 @@
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
#ifndef __ASSEMBLY__
@@ -59,7 +60,7 @@ enum {
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
- FW_FEATURE_CMO,
+ FW_FEATURE_CMO | FW_FEATURE_VPHN,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index de03ca58db5..ec089acfa56 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -232,7 +232,9 @@
#define H_GET_EM_PARMS 0x2B8
#define H_SET_MPP 0x2D0
#define H_GET_MPP 0x2D4
-#define MAX_HCALL_OPCODE H_GET_MPP
+#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
+#define H_BEST_ENERGY 0x2F4
+#define MAX_HCALL_OPCODE H_BEST_ENERGY
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 7f5e0fefebb..380d48bacd1 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -62,7 +62,10 @@ struct lppaca {
volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
u32 dsei_data; // DSEI data x24-x27
u64 sprg3; // SPRG3 value x28-x2F
- u8 reserved3[80]; // Reserved x30-x7F
+ u8 reserved3[40]; // Reserved x30-x57
+ volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
+ // associativity change counters x58-x5F
+ u8 reserved4[32]; // Reserved x60-x7F
//=============================================================================
// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index d045b014553..8433d36619a 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -27,9 +27,7 @@ struct iommu_table;
struct rtc_time;
struct file;
struct pci_controller;
-#ifdef CONFIG_KEXEC
struct kimage;
-#endif
#ifdef CONFIG_SMP
struct smp_ops_t {
@@ -72,7 +70,7 @@ struct machdep_calls {
int psize, int ssize);
void (*flush_hash_range)(unsigned long number, int local);
- /* special for kexec, to be called in real mode, linar mapping is
+ /* special for kexec, to be called in real mode, linear mapping is
* destroyed as well */
void (*hpte_clear_all)(void);
@@ -324,8 +322,6 @@ extern sys_ctrler_t sys_ctrler;
#endif /* CONFIG_PPC_PMAC */
-extern void setup_pci_ptrs(void);
-
#ifdef CONFIG_SMP
/* Poor default implementations */
extern void __devinit smp_generic_give_timebase(void);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index aac87cbceb5..fd3fd58bad8 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -33,6 +33,9 @@ extern int numa_cpu_lookup_table[];
extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn;
+u64 memory_hotplug_max(void);
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
#endif
/*
@@ -42,6 +45,8 @@ extern unsigned long max_pfn;
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
#endif /* CONFIG_NEED_MULTIPLE_NODES */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 850b72f2744..92efe67d1c5 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -10,31 +10,7 @@
#ifndef _ASM_POWERPC_NVRAM_H
#define _ASM_POWERPC_NVRAM_H
-#include <linux/errno.h>
-
-#define NVRW_CNT 0x20
-#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
-#define NVRAM_BLOCK_LEN 16
-#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
-#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
-
-#define NVRAM_AS0 0x74
-#define NVRAM_AS1 0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS 0x1FF9
-#define MOTO_RTC_MINUTES 0x1FFA
-#define MOTO_RTC_HOURS 0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
-#define MOTO_RTC_MONTH 0x1FFE
-#define MOTO_RTC_YEAR 0x1FFF
-#define MOTO_RTC_CONTROLA 0x1FF8
-#define MOTO_RTC_CONTROLB 0x1FF9
-
+/* Signatures for nvram partitions */
#define NVRAM_SIG_SP 0x02 /* support processor */
#define NVRAM_SIG_OF 0x50 /* open firmware config */
#define NVRAM_SIG_FW 0x51 /* general firmware */
@@ -49,32 +25,19 @@
#define NVRAM_SIG_OS 0xa0 /* OS defined */
#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */
-/* If change this size, then change the size of NVNAME_LEN */
-struct nvram_header {
- unsigned char signature;
- unsigned char checksum;
- unsigned short length;
- char name[12];
-};
-
#ifdef __KERNEL__
+#include <linux/errno.h>
#include <linux/list.h>
-struct nvram_partition {
- struct list_head partition;
- struct nvram_header header;
- unsigned int index;
-};
-
-
+#ifdef CONFIG_PPC_PSERIES
extern int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int err_seq);
extern int nvram_read_error_log(char * buff, int length,
unsigned int * err_type, unsigned int *err_seq);
extern int nvram_clear_error_log(void);
-
extern int pSeries_nvram_init(void);
+#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_MMIO_NVRAM
extern int mmio_nvram_init(void);
@@ -85,6 +48,13 @@ static inline int mmio_nvram_init(void)
}
#endif
+extern int __init nvram_scan_partitions(void);
+extern loff_t nvram_create_partition(const char *name, int sig,
+ int req_size, int min_size);
+extern int nvram_remove_partition(const char *name, int sig);
+extern int nvram_get_partition_size(loff_t data_index);
+extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
+
#endif /* __KERNEL__ */
/* PowerMac specific nvram stuffs */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 43adc8b819e..1255569387b 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -36,6 +36,8 @@
#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
+#define PPC_INST_POPCNTD 0x7c0003f4
+#define PPC_INST_POPCNTW 0x7c0002f4
#define PPC_INST_RFCI 0x4c000066
#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFMCI 0x4c00004c
@@ -88,6 +90,12 @@
__PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
__PPC_RB(b))
+#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
+ __PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
+ __PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \
+ __PPC_RA(a) | __PPC_RS(s))
#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 4c14187ba02..de1967a1ff5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -122,7 +122,6 @@ extern struct task_struct *last_task_used_spe;
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
#endif
-#ifdef __KERNEL__
#ifdef __powerpc64__
#define STACK_TOP_USER64 TASK_SIZE_USER64
@@ -139,7 +138,6 @@ extern struct task_struct *last_task_used_spe;
#define STACK_TOP_MAX STACK_TOP
#endif /* __powerpc64__ */
-#endif /* __KERNEL__ */
typedef struct {
unsigned long seg;
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index afe4aaa65c3..7ef0d90defc 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -106,9 +106,22 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
int nid)
{
}
-
#endif /* CONFIG_NUMA */
+#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+extern int start_topology_update(void);
+extern int stop_topology_update(void);
+#else
+static inline int start_topology_update(void)
+{
+ return 0;
+}
+static inline int stop_topology_update(void)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+
#include <asm-generic/topology.h>
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 08679c5319b..25e39220e89 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -116,9 +116,7 @@ struct vdso_data {
#endif /* CONFIG_PPC64 */
-#ifdef __KERNEL__
extern struct vdso_data *vdso_data;
-#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 36c30f31ec9..3bb2a3e6a33 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,8 +29,10 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \
- signal.o sysfs.o cacheinfo.o
-obj-y += vdso32/
+ signal.o sysfs.o cacheinfo.o time.o \
+ prom.o traps.o setup-common.o \
+ udbg.o misc.o io.o dma.o \
+ misc_$(CONFIG_WORD_SIZE).o vdso32/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
@@ -80,9 +82,6 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds
-obj-y += time.o prom.o traps.o setup-common.o \
- udbg.o misc.o io.o dma.o \
- misc_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index bd0df2e6aa8..23e6a93145a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -209,7 +209,6 @@ int main(void)
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
/* Interrupt register frame */
- DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 96a908f1cd8..be5ab18b03b 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -457,16 +457,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_power7,
- .cpu_restore = __restore_cpu_power7,
.oprofile_cpu_type = "ppc64/power7",
.oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
- .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
- .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
- POWER6_MMCRA_OTHER,
.platform = "power7",
},
+ { /* Power7+ */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004A0000,
+ .cpu_name = "POWER7+ (raw)",
+ .cpu_features = CPU_FTRS_POWER7,
+ .cpu_user_features = COMMON_USER_POWER7,
+ .mmu_features = MMU_FTR_HPTE_TABLE |
+ MMU_FTR_TLBIE_206,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power7",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "power7+",
+ },
{ /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 8e05c16344e..0a2af50243c 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -19,6 +19,7 @@
#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/uaccess.h>
+#include <asm/rtas.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -141,3 +142,35 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
return csize;
}
+
+#ifdef CONFIG_PPC_RTAS
+/*
+ * The crashkernel region will almost always overlap the RTAS region, so
+ * we have to be careful when shrinking the crashkernel region.
+ */
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+ const u32 *basep, *sizep;
+ unsigned int rtas_start = 0, rtas_end = 0;
+
+ basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
+ sizep = of_get_property(rtas.dev, "rtas-size", NULL);
+
+ if (basep && sizep) {
+ rtas_start = *basep;
+ rtas_end = *basep + *sizep;
+ }
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ /* Does this page overlap with the RTAS region? */
+ if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
+ continue;
+
+ ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+ init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+ free_page((unsigned long)__va(addr));
+ totalram_pages++;
+ }
+}
+#endif
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 6e54a0fd31a..e7554154a6d 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -19,7 +19,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
- dma_handle, device_to_mask(dev), flag,
+ dma_handle, dev->coherent_dma_mask, flag,
dev_to_node(dev));
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ed4aeb96398..c22dc1ec1c9 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -31,6 +31,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
+#include <asm/ptrace.h>
#undef SHOW_SYSCALLS
#undef SHOW_SYSCALLS_TASK
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f8b01d6466..8a817995b4c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -13,6 +13,7 @@
*/
#include <asm/exception-64s.h>
+#include <asm/ptrace.h>
/*
* We layout physical memory as follows:
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index e86c040ae58..de369558bf0 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -23,6 +23,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
#ifdef CONFIG_VSX
#define REST_32FPVSRS(n,c,base) \
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 8278e8bad5a..9dd21a8c4d5 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -40,6 +40,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 562305b40a8..cbb3436b592 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
#include <asm/synch.h>
#include "head_booke.h"
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f0dd577e4a5..782f23df7c8 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -38,6 +38,7 @@
#include <asm/page_64.h>
#include <asm/irqflags.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/ptrace.h>
/* The physical memory is layed out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -96,7 +97,7 @@ __secondary_hold_acknowledge:
.llong hvReleaseData-KERNELBASE
#endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_CRASH_DUMP
+#ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
* at the loaded address instead of the linked address. This
* is used by kexec-tools to keep the the kdump kernel in the
@@ -384,12 +385,10 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldi r25,r25,32
-#ifdef CONFIG_CRASH_DUMP
lwz r7,__run_at_load-_stext(r26)
- cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */
+ cmplwi cr0,r7,1 /* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
-#endif
1: mr r3,r25
bl .relocate
#endif
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 1f1a04b5c2a..1cbf64e6b41 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -29,6 +29,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 529b817f473..3e02710d956 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -41,6 +41,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
+#include <asm/ptrace.h>
#include "head_booke.h"
/* As with the other PowerPC ports, it is expected that when code
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d5839179ec7..961bb03413f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -311,8 +311,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
- printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
- " npages %lx\n", tbl, vaddr, npages);
+ dev_info(dev, "iommu_alloc failed, tbl %p "
+ "vaddr %lx npages %lu\n", tbl, vaddr,
+ npages);
goto failure;
}
@@ -579,9 +580,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
- printk(KERN_INFO "iommu_alloc failed, "
- "tbl %p vaddr %p npages %d\n",
- tbl, vaddr, npages);
+ dev_info(dev, "iommu_alloc failed, tbl %p "
+ "vaddr %p npages %d\n", tbl, vaddr,
+ npages);
}
} else
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
@@ -627,7 +628,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
* the tce tables.
*/
if (order >= IOMAP_MAX_ORDER) {
- printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
+ dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
+ size);
return NULL;
}
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 2d29752cbe1..b69463ec201 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -122,8 +122,3 @@ _GLOBAL(longjmp)
mtlr r0
mr r3,r4
blr
-
-_GLOBAL(__setup_cpu_power7)
-_GLOBAL(__restore_cpu_power7)
- /* place holder */
- blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index a7a570dcdd5..094bd9821ad 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -30,6 +30,7 @@
#include <asm/processor.h>
#include <asm/kexec.h>
#include <asm/bug.h>
+#include <asm/ptrace.h>
.text
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e5144906a56..206a321a71d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/kexec.h>
+#include <asm/ptrace.h>
.text
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 9cf197f01e9..bb12b3248f1 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -34,15 +34,26 @@
#undef DEBUG_NVRAM
-static struct nvram_partition * nvram_part;
-static long nvram_error_log_index = -1;
-static long nvram_error_log_size = 0;
+#define NVRAM_HEADER_LEN sizeof(struct nvram_header)
+#define NVRAM_BLOCK_LEN NVRAM_HEADER_LEN
+
+/* If change this size, then change the size of NVNAME_LEN */
+struct nvram_header {
+ unsigned char signature;
+ unsigned char checksum;
+ unsigned short length;
+ /* Terminating null required only for names < 12 chars. */
+ char name[12];
+};
-struct err_log_info {
- int error_type;
- unsigned int seq_num;
+struct nvram_partition {
+ struct list_head partition;
+ struct nvram_header header;
+ unsigned int index;
};
+static LIST_HEAD(nvram_partitions);
+
static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
{
int size;
@@ -186,14 +197,12 @@ static struct miscdevice nvram_dev = {
#ifdef DEBUG_NVRAM
static void __init nvram_print_partitions(char * label)
{
- struct list_head * p;
struct nvram_partition * tmp_part;
printk(KERN_WARNING "--------%s---------\n", label);
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
- list_for_each(p, &nvram_part->partition) {
- tmp_part = list_entry(p, struct nvram_partition, partition);
- printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
+ list_for_each_entry(tmp_part, &nvram_partitions, partition) {
+ printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n",
tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name);
@@ -228,95 +237,113 @@ static unsigned char __init nvram_checksum(struct nvram_header *p)
return c_sum;
}
-static int __init nvram_remove_os_partition(void)
+/**
+ * nvram_remove_partition - Remove one or more partitions in nvram
+ * @name: name of the partition to remove, or NULL for a
+ * signature only match
+ * @sig: signature of the partition(s) to remove
+ */
+
+int __init nvram_remove_partition(const char *name, int sig)
{
- struct list_head *i;
- struct list_head *j;
- struct nvram_partition * part;
- struct nvram_partition * cur_part;
+ struct nvram_partition *part, *prev, *tmp;
int rc;
- list_for_each(i, &nvram_part->partition) {
- part = list_entry(i, struct nvram_partition, partition);
- if (part->header.signature != NVRAM_SIG_OS)
+ list_for_each_entry(part, &nvram_partitions, partition) {
+ if (part->header.signature != sig)
continue;
-
- /* Make os partition a free partition */
+ if (name && strncmp(name, part->header.name, 12))
+ continue;
+
+ /* Make partition a free partition */
part->header.signature = NVRAM_SIG_FREE;
- sprintf(part->header.name, "wwwwwwwwwwww");
+ strncpy(part->header.name, "wwwwwwwwwwww", 12);
part->header.checksum = nvram_checksum(&part->header);
-
- /* Merge contiguous free partitions backwards */
- list_for_each_prev(j, &part->partition) {
- cur_part = list_entry(j, struct nvram_partition, partition);
- if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
- break;
- }
-
- part->header.length += cur_part->header.length;
- part->header.checksum = nvram_checksum(&part->header);
- part->index = cur_part->index;
-
- list_del(&cur_part->partition);
- kfree(cur_part);
- j = &part->partition; /* fixup our loop */
- }
-
- /* Merge contiguous free partitions forwards */
- list_for_each(j, &part->partition) {
- cur_part = list_entry(j, struct nvram_partition, partition);
- if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
- break;
- }
-
- part->header.length += cur_part->header.length;
- part->header.checksum = nvram_checksum(&part->header);
-
- list_del(&cur_part->partition);
- kfree(cur_part);
- j = &part->partition; /* fixup our loop */
- }
-
rc = nvram_write_header(part);
if (rc <= 0) {
- printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc);
+ printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;
}
+ }
+ /* Merge contiguous ones */
+ prev = NULL;
+ list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) {
+ if (part->header.signature != NVRAM_SIG_FREE) {
+ prev = NULL;
+ continue;
+ }
+ if (prev) {
+ prev->header.length += part->header.length;
+ prev->header.checksum = nvram_checksum(&part->header);
+ rc = nvram_write_header(part);
+ if (rc <= 0) {
+ printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
+ return rc;
+ }
+ list_del(&part->partition);
+ kfree(part);
+ } else
+ prev = part;
}
return 0;
}
-/* nvram_create_os_partition
+/**
+ * nvram_create_partition - Create a partition in nvram
+ * @name: name of the partition to create
+ * @sig: signature of the partition to create
+ * @req_size: size of data to allocate in bytes
+ * @min_size: minimum acceptable size (0 means req_size)
*
- * Create a OS linux partition to buffer error logs.
- * Will create a partition starting at the first free
- * space found if space has enough room.
+ * Returns a negative error code or a positive nvram index
+ * of the beginning of the data area of the newly created
+ * partition. If you provided a min_size smaller than req_size
+ * you need to query for the actual size yourself after the
+ * call using nvram_partition_get_size().
*/
-static int __init nvram_create_os_partition(void)
+loff_t __init nvram_create_partition(const char *name, int sig,
+ int req_size, int min_size)
{
struct nvram_partition *part;
struct nvram_partition *new_part;
struct nvram_partition *free_part = NULL;
- int seq_init[2] = { 0, 0 };
+ static char nv_init_vals[16];
loff_t tmp_index;
long size = 0;
int rc;
-
+
+ /* Convert sizes from bytes to blocks */
+ req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+ min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+
+ /* If no minimum size specified, make it the same as the
+ * requested size
+ */
+ if (min_size == 0)
+ min_size = req_size;
+ if (min_size > req_size)
+ return -EINVAL;
+
+ /* Now add one block to each for the header */
+ req_size += 1;
+ min_size += 1;
+
/* Find a free partition that will give us the maximum needed size
If can't find one that will give us the minimum size needed */
- list_for_each_entry(part, &nvram_part->partition, partition) {
+ list_for_each_entry(part, &nvram_partitions, partition) {
if (part->header.signature != NVRAM_SIG_FREE)
continue;
- if (part->header.length >= NVRAM_MAX_REQ) {
- size = NVRAM_MAX_REQ;
+ if (part->header.length >= req_size) {
+ size = req_size;
free_part = part;
break;
}
- if (!size && part->header.length >= NVRAM_MIN_REQ) {
- size = NVRAM_MIN_REQ;
+ if (part->header.length > size &&
+ part->header.length >= min_size) {
+ size = part->header.length;
free_part = part;
}
}
@@ -326,136 +353,95 @@ static int __init nvram_create_os_partition(void)
/* Create our OS partition */
new_part = kmalloc(sizeof(*new_part), GFP_KERNEL);
if (!new_part) {
- printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n");
+ pr_err("nvram_create_os_partition: kmalloc failed\n");
return -ENOMEM;
}
new_part->index = free_part->index;
- new_part->header.signature = NVRAM_SIG_OS;
+ new_part->header.signature = sig;
new_part->header.length = size;
- strcpy(new_part->header.name, "ppc64,linux");
+ strncpy(new_part->header.name, name, 12);
new_part->header.checksum = nvram_checksum(&new_part->header);
rc = nvram_write_header(new_part);
if (rc <= 0) {
- printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
- "failed (%d)\n", rc);
- return rc;
- }
-
- /* make sure and initialize to zero the sequence number and the error
- type logged */
- tmp_index = new_part->index + NVRAM_HEADER_LEN;
- rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_create_os_partition: nvram_write "
+ pr_err("nvram_create_os_partition: nvram_write_header "
"failed (%d)\n", rc);
return rc;
}
-
- nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN;
- nvram_error_log_size = ((part->header.length - 1) *
- NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
-
list_add_tail(&new_part->partition, &free_part->partition);
- if (free_part->header.length <= size) {
+ /* Adjust or remove the partition we stole the space from */
+ if (free_part->header.length > size) {
+ free_part->index += size * NVRAM_BLOCK_LEN;
+ free_part->header.length -= size;
+ free_part->header.checksum = nvram_checksum(&free_part->header);
+ rc = nvram_write_header(free_part);
+ if (rc <= 0) {
+ pr_err("nvram_create_os_partition: nvram_write_header "
+ "failed (%d)\n", rc);
+ return rc;
+ }
+ } else {
list_del(&free_part->partition);
kfree(free_part);
- return 0;
}
- /* Adjust the partition we stole the space from */
- free_part->index += size * NVRAM_BLOCK_LEN;
- free_part->header.length -= size;
- free_part->header.checksum = nvram_checksum(&free_part->header);
-
- rc = nvram_write_header(free_part);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
- "failed (%d)\n", rc);
- return rc;
+ /* Clear the new partition */
+ for (tmp_index = new_part->index + NVRAM_HEADER_LEN;
+ tmp_index < ((size - 1) * NVRAM_BLOCK_LEN);
+ tmp_index += NVRAM_BLOCK_LEN) {
+ rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
+ if (rc <= 0) {
+ pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc);
+ return rc;
+ }
}
-
- return 0;
+
+ return new_part->index + NVRAM_HEADER_LEN;
}
-
-/* nvram_setup_partition
- *
- * This will setup the partition we need for buffering the
- * error logs and cleanup partitions if needed.
- *
- * The general strategy is the following:
- * 1.) If there is ppc64,linux partition large enough then use it.
- * 2.) If there is not a ppc64,linux partition large enough, search
- * for a free partition that is large enough.
- * 3.) If there is not a free partition large enough remove
- * _all_ OS partitions and consolidate the space.
- * 4.) Will first try getting a chunk that will satisfy the maximum
- * error log size (NVRAM_MAX_REQ).
- * 5.) If the max chunk cannot be allocated then try finding a chunk
- * that will satisfy the minum needed (NVRAM_MIN_REQ).
+/**
+ * nvram_get_partition_size - Get the data size of an nvram partition
+ * @data_index: This is the offset of the start of the data of
+ * the partition. The same value that is returned by
+ * nvram_create_partition().
*/
-static int __init nvram_setup_partition(void)
+int nvram_get_partition_size(loff_t data_index)
{
- struct list_head * p;
- struct nvram_partition * part;
- int rc;
-
- /* For now, we don't do any of this on pmac, until I
- * have figured out if it's worth killing some unused stuffs
- * in our nvram, as Apple defined partitions use pretty much
- * all of the space
- */
- if (machine_is(powermac))
- return -ENOSPC;
-
- /* see if we have an OS partition that meets our needs.
- will try getting the max we need. If not we'll delete
- partitions and try again. */
- list_for_each(p, &nvram_part->partition) {
- part = list_entry(p, struct nvram_partition, partition);
- if (part->header.signature != NVRAM_SIG_OS)
- continue;
+ struct nvram_partition *part;
+
+ list_for_each_entry(part, &nvram_partitions, partition) {
+ if (part->index + NVRAM_HEADER_LEN == data_index)
+ return (part->header.length - 1) * NVRAM_BLOCK_LEN;
+ }
+ return -1;
+}
- if (strcmp(part->header.name, "ppc64,linux"))
- continue;
- if (part->header.length >= NVRAM_MIN_REQ) {
- /* found our partition */
- nvram_error_log_index = part->index + NVRAM_HEADER_LEN;
- nvram_error_log_size = ((part->header.length - 1) *
- NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
- return 0;
+/**
+ * nvram_find_partition - Find an nvram partition by signature and name
+ * @name: Name of the partition or NULL for any name
+ * @sig: Signature to test against
+ * @out_size: if non-NULL, returns the size of the data part of the partition
+ */
+loff_t nvram_find_partition(const char *name, int sig, int *out_size)
+{
+ struct nvram_partition *p;
+
+ list_for_each_entry(p, &nvram_partitions, partition) {
+ if (p->header.signature == sig &&
+ (!name || !strncmp(p->header.name, name, 12))) {
+ if (out_size)
+ *out_size = (p->header.length - 1) *
+ NVRAM_BLOCK_LEN;
+ return p->index + NVRAM_HEADER_LEN;
}
}
-
- /* try creating a partition with the free space we have */
- rc = nvram_create_os_partition();
- if (!rc) {
- return 0;
- }
-
- /* need to free up some space */
- rc = nvram_remove_os_partition();
- if (rc) {
- return rc;
- }
-
- /* create a partition in this new space */
- rc = nvram_create_os_partition();
- if (rc) {
- printk(KERN_ERR "nvram_create_os_partition: Could not find a "
- "NVRAM partition large enough\n");
- return rc;
- }
-
return 0;
}
-
-static int __init nvram_scan_partitions(void)
+int __init nvram_scan_partitions(void)
{
loff_t cur_index = 0;
struct nvram_header phead;
@@ -465,7 +451,7 @@ static int __init nvram_scan_partitions(void)
int total_size;
int err;
- if (ppc_md.nvram_size == NULL)
+ if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
return -ENODEV;
total_size = ppc_md.nvram_size();
@@ -512,12 +498,16 @@ static int __init nvram_scan_partitions(void)
memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN);
tmp_part->index = cur_index;
- list_add_tail(&tmp_part->partition, &nvram_part->partition);
+ list_add_tail(&tmp_part->partition, &nvram_partitions);
cur_index += phead.length * NVRAM_BLOCK_LEN;
}
err = 0;
+#ifdef DEBUG_NVRAM
+ nvram_print_partitions("NVRAM Partitions");
+#endif
+
out:
kfree(header);
return err;
@@ -525,9 +515,10 @@ static int __init nvram_scan_partitions(void)
static int __init nvram_init(void)
{
- int error;
int rc;
+ BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
+
if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
return -ENODEV;
@@ -537,29 +528,6 @@ static int __init nvram_init(void)
return rc;
}
- /* initialize our anchor for the nvram partition list */
- nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
- if (!nvram_part) {
- printk(KERN_ERR "nvram_init: Failed kmalloc\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&nvram_part->partition);
-
- /* Get all the NVRAM partitions */
- error = nvram_scan_partitions();
- if (error) {
- printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n");
- return error;
- }
-
- if(nvram_setup_partition())
- printk(KERN_WARNING "nvram_init: Could not find nvram partition"
- " for nvram buffered error logging.\n");
-
-#ifdef DEBUG_NVRAM
- nvram_print_partitions("NVRAM Partitions");
-#endif
-
return rc;
}
@@ -568,135 +536,6 @@ void __exit nvram_cleanup(void)
misc_deregister( &nvram_dev );
}
-
-#ifdef CONFIG_PPC_PSERIES
-
-/* nvram_write_error_log
- *
- * We need to buffer the error logs into nvram to ensure that we have
- * the failure information to decode. If we have a severe error there
- * is no way to guarantee that the OS or the machine is in a state to
- * get back to user land and write the error to disk. For example if
- * the SCSI device driver causes a Machine Check by writing to a bad
- * IO address, there is no way of guaranteeing that the device driver
- * is in any state that is would also be able to write the error data
- * captured to disk, thus we buffer it in NVRAM for analysis on the
- * next boot.
- *
- * In NVRAM the partition containing the error log buffer will looks like:
- * Header (in bytes):
- * +-----------+----------+--------+------------+------------------+
- * | signature | checksum | length | name | data |
- * |0 |1 |2 3|4 15|16 length-1|
- * +-----------+----------+--------+------------+------------------+
- *
- * The 'data' section would look like (in bytes):
- * +--------------+------------+-----------------------------------+
- * | event_logged | sequence # | error log |
- * |0 3|4 7|8 nvram_error_log_size-1|
- * +--------------+------------+-----------------------------------+
- *
- * event_logged: 0 if event has not been logged to syslog, 1 if it has
- * sequence #: The unique sequence # for each event. (until it wraps)
- * error log: The error log from event_scan
- */
-int nvram_write_error_log(char * buff, int length,
- unsigned int err_type, unsigned int error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (nvram_error_log_index == -1) {
- return -ESPIPE;
- }
-
- if (length > nvram_error_log_size) {
- length = nvram_error_log_size;
- }
-
- info.error_type = err_type;
- info.seq_num = error_log_cnt;
-
- tmp_index = nvram_error_log_index;
-
- rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
- return rc;
- }
-
- rc = ppc_md.nvram_write(buff, length, &tmp_index);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-/* nvram_read_error_log
- *
- * Reads nvram for error log for at most 'length'
- */
-int nvram_read_error_log(char * buff, int length,
- unsigned int * err_type, unsigned int * error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (nvram_error_log_index == -1)
- return -1;
-
- if (length > nvram_error_log_size)
- length = nvram_error_log_size;
-
- tmp_index = nvram_error_log_index;
-
- rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
- return rc;
- }
-
- rc = ppc_md.nvram_read(buff, length, &tmp_index);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
- return rc;
- }
-
- *error_log_cnt = info.seq_num;
- *err_type = info.error_type;
-
- return 0;
-}
-
-/* This doesn't actually zero anything, but it sets the event_logged
- * word to tell that this event is safely in syslog.
- */
-int nvram_clear_error_log(void)
-{
- loff_t tmp_index;
- int clear_word = ERR_FLAG_ALREADY_LOGGED;
- int rc;
-
- if (nvram_error_log_index == -1)
- return -1;
-
- tmp_index = nvram_error_log_index;
-
- rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
- if (rc <= 0) {
- printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-#endif /* CONFIG_PPC_PSERIES */
-
module_init(nvram_init);
module_exit(nvram_cleanup);
MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d43fc65749c..851577608a7 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -193,8 +193,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
- pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n",
- hose->io_resource.start, hose->io_resource.end);
+ pr_debug(" hose->io_resource=%pR\n", &hose->io_resource);
return 0;
}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392ac63..ef3ef566235 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -186,3 +186,10 @@ EXPORT_SYMBOL(__mtdcr);
EXPORT_SYMBOL(__mfdcr);
#endif
EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(__arch_hweight8);
+EXPORT_SYMBOL(__arch_hweight16);
+EXPORT_SYMBOL(__arch_hweight32);
+EXPORT_SYMBOL(__arch_hweight64);
+#endif
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 5113bd2285e..e83ba3f078e 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
/*
* Grab the register values as they are now.
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a9b32967cff..90653699829 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1316,6 +1316,10 @@ static int set_dac_range(struct task_struct *child,
static long ppc_set_hwdebug(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dabr;
+#endif
+
if (bp_info->version != 1)
return -ENOTSUPP;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1353,11 +1357,10 @@ static long ppc_set_hwdebug(struct task_struct *child,
/*
* We only support one data breakpoint
*/
- if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) ||
- ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) ||
- (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) ||
- (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) ||
- (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
+ if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
+ (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
+ bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
+ bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
if (child->thread.dabr)
@@ -1366,7 +1369,14 @@ static long ppc_set_hwdebug(struct task_struct *child,
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
- child->thread.dabr = (unsigned long)bp_info->addr;
+ dabr = (unsigned long)bp_info->addr & ~7UL;
+ dabr |= DABR_TRANSLATION;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ dabr |= DABR_DATA_READ;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ dabr |= DABR_DATA_WRITE;
+
+ child->thread.dabr = dabr;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 8a6daf4129f..69c4be917d0 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -280,7 +280,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ ret = put_user(child->thread.dac1, (u32 __user *)data);
+#else
ret = put_user(child->thread.dabr, (u32 __user *)data);
+#endif
break;
}
@@ -312,6 +316,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_SET_DEBUGREG:
case PTRACE_SYSCALL:
case PTRACE_CONT:
+ case PPC_PTRACE_GETHWDBGINFO:
+ case PPC_PTRACE_SETHWDEBUG:
+ case PPC_PTRACE_DELHWDEBUG:
ret = arch_ptrace(child, request, addr, data);
break;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8fe8bc61c10..2097f2b3cba 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -41,6 +41,7 @@
#include <asm/atomic.h>
#include <asm/time.h>
#include <asm/mmu.h>
+#include <asm/topology.h>
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -713,6 +714,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
int cpu;
slb_set_size(SLB_MIN_SIZE);
+ stop_topology_update();
printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -728,6 +730,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
rc = atomic_read(&data->error);
atomic_set(&data->error, rc);
+ start_topology_update();
if (wake_when_done) {
atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ce6f61c6f87..5a0401fcaeb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -437,8 +437,8 @@ static void __init irqstack_early_init(void)
unsigned int i;
/*
- * interrupt stacks must be under 256MB, we cannot afford to take
- * SLB misses on them.
+ * Interrupt stacks must be in the first segment since we
+ * cannot afford to take SLB misses on them.
*/
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 68034bbf2e4..98136050917 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -466,7 +466,20 @@ out:
return id;
}
-/* Must be called when no change can occur to cpu_present_mask,
+/* Helper routines for cpu to core mapping */
+int cpu_core_index_of_thread(int cpu)
+{
+ return cpu >> threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
+
+int cpu_first_thread_of_core(int core)
+{
+ return core << threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
+
+/* Must be called when no change can occur to cpu_present_map,
* i.e. during cpu online or offline.
*/
static struct device_node *cpu_to_l2cache(int cpu)
@@ -514,7 +527,7 @@ int __devinit start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
/* Update sibling maps */
- base = cpu_first_thread_in_core(cpu);
+ base = cpu_first_thread_sibling(cpu);
for (i = 0; i < threads_per_core; i++) {
if (cpu_is_offline(base + i))
continue;
@@ -600,7 +613,7 @@ int __cpu_disable(void)
return err;
/* Update sibling maps */
- base = cpu_first_thread_in_core(cpu);
+ base = cpu_first_thread_sibling(cpu);
for (i = 0; i < threads_per_core; i++) {
cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 010406958d9..09e4dea4a85 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(rtc_lock);
static u64 tb_to_ns_scale __read_mostly;
static unsigned tb_to_ns_shift __read_mostly;
-static unsigned long boot_tb __read_mostly;
+static u64 boot_tb __read_mostly;
extern struct timezone sys_tz;
static long timezone_offset;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index fe460482fa6..9de6f396cf8 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -5,6 +5,7 @@
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/ptrace.h>
/*
* load_up_altivec(unused, unused, tsk)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 441d2a722f0..1b695fdc362 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -600,6 +600,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
vio_cmo_dealloc(viodev, alloc_size);
}
+static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+ return dma_iommu_ops.dma_supported(dev, mask);
+}
+
struct dma_map_ops vio_dma_mapping_ops = {
.alloc_coherent = vio_dma_iommu_alloc_coherent,
.free_coherent = vio_dma_iommu_free_coherent,
@@ -607,6 +612,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page,
+ .dma_supported = vio_dma_iommu_dma_supported,
};
@@ -858,8 +864,7 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
{
- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
- viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
+ set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
}
/**
@@ -1244,7 +1249,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_set_dma_ops(viodev);
else
- viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+ set_dma_ops(&viodev->dev, &dma_iommu_ops);
set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
@@ -1252,6 +1257,10 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
viodev->dev.parent = &vio_bus_device.dev;
viodev->dev.bus = &vio_bus_type;
viodev->dev.release = vio_dev_release;
+ /* needed to ensure proper operation of coherent allocations
+ * later, in case driver doesn't set it explicitly */
+ dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
/* register with generic device framework */
if (device_register(&viodev->dev)) {
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 889f2bc106d..166a6a0ad54 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o \
- checksum_wrappers_64.o
+ checksum_wrappers_64.o hweight_64.o
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
new file mode 100644
index 00000000000..fda27868cf8
--- /dev/null
+++ b/arch/powerpc/lib/hweight_64.S
@@ -0,0 +1,110 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2010
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+/* Note: This code relies on -mminimal-toc */
+
+_GLOBAL(__arch_hweight8)
+BEGIN_FTR_SECTION
+ b .__sw_hweight8
+ nop
+ nop
+FTR_SECTION_ELSE
+ PPC_POPCNTB(r3,r3)
+ clrldi r3,r3,64-8
+ blr
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight16)
+BEGIN_FTR_SECTION
+ b .__sw_hweight16
+ nop
+ nop
+ nop
+ nop
+FTR_SECTION_ELSE
+ BEGIN_FTR_SECTION_NESTED(50)
+ PPC_POPCNTB(r3,r3)
+ srdi r4,r3,8
+ add r3,r4,r3
+ clrldi r3,r3,64-8
+ blr
+ FTR_SECTION_ELSE_NESTED(50)
+ clrlwi r3,r3,16
+ PPC_POPCNTW(r3,r3)
+ clrldi r3,r3,64-8
+ blr
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight32)
+BEGIN_FTR_SECTION
+ b .__sw_hweight32
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+FTR_SECTION_ELSE
+ BEGIN_FTR_SECTION_NESTED(51)
+ PPC_POPCNTB(r3,r3)
+ srdi r4,r3,16
+ add r3,r4,r3
+ srdi r4,r3,8
+ add r3,r4,r3
+ clrldi r3,r3,64-8
+ blr
+ FTR_SECTION_ELSE_NESTED(51)
+ PPC_POPCNTW(r3,r3)
+ clrldi r3,r3,64-8
+ blr
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight64)
+BEGIN_FTR_SECTION
+ b .__sw_hweight64
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+FTR_SECTION_ELSE
+ BEGIN_FTR_SECTION_NESTED(52)
+ PPC_POPCNTB(r3,r3)
+ srdi r4,r3,32
+ add r3,r4,r3
+ srdi r4,r3,16
+ add r3,r4,r3
+ srdi r4,r3,8
+ add r3,r4,r3
+ clrldi r3,r3,64-8
+ blr
+ FTR_SECTION_ELSE_NESTED(52)
+ PPC_POPCNTD(r3,r3)
+ clrldi r3,r3,64-8
+ blr
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5e9584405c4..a5991facddc 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1070,7 +1070,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap)
{
unsigned long vsid;
- void *pgdir;
+ pgd_t *pgdir;
pte_t *ptep;
unsigned long flags;
int rc, ssize, local = 0;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5ce99848d91..c0aab52da3a 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -111,8 +111,8 @@ static unsigned int steal_context_smp(unsigned int id)
* a core map instead but this will do for now.
*/
for_each_cpu(cpu, mm_cpumask(mm)) {
- for (i = cpu_first_thread_in_core(cpu);
- i <= cpu_last_thread_in_core(cpu); i++)
+ for (i = cpu_first_thread_sibling(cpu);
+ i <= cpu_last_thread_sibling(cpu); i++)
__set_bit(id, stale_map[i]);
cpu = i - 1;
}
@@ -264,14 +264,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
*/
if (test_bit(id, stale_map[cpu])) {
pr_hardcont(" | stale flush %d [%d..%d]",
- id, cpu_first_thread_in_core(cpu),
- cpu_last_thread_in_core(cpu));
+ id, cpu_first_thread_sibling(cpu),
+ cpu_last_thread_sibling(cpu));
local_flush_tlb_mm(next);
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
- for (i = cpu_first_thread_in_core(cpu);
- i <= cpu_last_thread_in_core(cpu); i++) {
+ for (i = cpu_first_thread_sibling(cpu);
+ i <= cpu_last_thread_sibling(cpu); i++) {
__clear_bit(id, stale_map[i]);
}
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 74505b24537..bf5cb91f07d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -20,10 +20,15 @@
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/pfn.h>
+#include <linux/cpuset.h>
+#include <linux/node.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/system.h>
#include <asm/smp.h>
+#include <asm/firmware.h>
+#include <asm/paca.h>
+#include <asm/hvcall.h>
static int numa_enabled = 1;
@@ -163,7 +168,7 @@ static void __init get_node_active_region(unsigned long start_pfn,
work_with_active_regions(nid, get_active_region_work_fn, node_ar);
}
-static void __cpuinit map_cpu_to_node(int cpu, int node)
+static void map_cpu_to_node(int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
@@ -173,7 +178,7 @@ static void __cpuinit map_cpu_to_node(int cpu, int node)
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
}
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
static void unmap_cpu_from_node(unsigned long cpu)
{
int node = numa_cpu_lookup_table[cpu];
@@ -187,7 +192,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
cpu, node);
}
}
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
/* must hold reference to node during call */
static const int *of_get_associativity(struct device_node *dev)
@@ -246,32 +251,41 @@ static void initialize_distance_lookup_table(int nid,
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
* info is found.
*/
-static int of_node_to_nid_single(struct device_node *device)
+static int associativity_to_nid(const unsigned int *associativity)
{
int nid = -1;
- const unsigned int *tmp;
if (min_common_depth == -1)
goto out;
- tmp = of_get_associativity(device);
- if (!tmp)
- goto out;
-
- if (tmp[0] >= min_common_depth)
- nid = tmp[min_common_depth];
+ if (associativity[0] >= min_common_depth)
+ nid = associativity[min_common_depth];
/* POWER4 LPAR uses 0xffff as invalid node */
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = -1;
- if (nid > 0 && tmp[0] >= distance_ref_points_depth)
- initialize_distance_lookup_table(nid, tmp);
+ if (nid > 0 && associativity[0] >= distance_ref_points_depth)
+ initialize_distance_lookup_table(nid, associativity);
out:
return nid;
}
+/* Returns the nid associated with the given device tree node,
+ * or -1 if not found.
+ */
+static int of_node_to_nid_single(struct device_node *device)
+{
+ int nid = -1;
+ const unsigned int *tmp;
+
+ tmp = of_get_associativity(device);
+ if (tmp)
+ nid = associativity_to_nid(tmp);
+ return nid;
+}
+
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
@@ -1247,4 +1261,275 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
return nid;
}
+static u64 hot_add_drconf_memory_max(void)
+{
+ struct device_node *memory = NULL;
+ unsigned int drconf_cell_cnt = 0;
+ u64 lmb_size = 0;
+ const u32 *dm = 0;
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
+ lmb_size = of_get_lmb_size(memory);
+ of_node_put(memory);
+ }
+ return lmb_size * drconf_cell_cnt;
+}
+
+/*
+ * memory_hotplug_max - return max address of memory that may be added
+ *
+ * This is currently only used on systems that support drconfig memory
+ * hotplug.
+ */
+u64 memory_hotplug_max(void)
+{
+ return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
+}
#endif /* CONFIG_MEMORY_HOTPLUG */
+
+/* Vrtual Processor Home Node (VPHN) support */
+#ifdef CONFIG_PPC_SPLPAR
+#define VPHN_NR_CHANGE_CTRS (8)
+static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
+static cpumask_t cpu_associativity_changes_mask;
+static int vphn_enabled;
+static void set_topology_timer(void);
+
+/*
+ * Store the current values of the associativity change counters in the
+ * hypervisor.
+ */
+static void setup_cpu_associativity_change_counters(void)
+{
+ int cpu = 0;
+
+ for_each_possible_cpu(cpu) {
+ int i = 0;
+ u8 *counts = vphn_cpu_change_counts[cpu];
+ volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+ for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+ counts[i] = hypervisor_counts[i];
+ }
+ }
+}
+
+/*
+ * The hypervisor maintains a set of 8 associativity change counters in
+ * the VPA of each cpu that correspond to the associativity levels in the
+ * ibm,associativity-reference-points property. When an associativity
+ * level changes, the corresponding counter is incremented.
+ *
+ * Set a bit in cpu_associativity_changes_mask for each cpu whose home
+ * node associativity levels have changed.
+ *
+ * Returns the number of cpus with unhandled associativity changes.
+ */
+static int update_cpu_associativity_changes_mask(void)
+{
+ int cpu = 0, nr_cpus = 0;
+ cpumask_t *changes = &cpu_associativity_changes_mask;
+
+ cpumask_clear(changes);
+
+ for_each_possible_cpu(cpu) {
+ int i, changed = 0;
+ u8 *counts = vphn_cpu_change_counts[cpu];
+ volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+ for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+ if (hypervisor_counts[i] > counts[i]) {
+ counts[i] = hypervisor_counts[i];
+ changed = 1;
+ }
+ }
+ if (changed) {
+ cpumask_set_cpu(cpu, changes);
+ nr_cpus++;
+ }
+ }
+
+ return nr_cpus;
+}
+
+/* 6 64-bit registers unpacked into 12 32-bit associativity values */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
+
+/*
+ * Convert the associativity domain numbers returned from the hypervisor
+ * to the sequence they would appear in the ibm,associativity property.
+ */
+static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
+{
+ int i = 0;
+ int nr_assoc_doms = 0;
+ const u16 *field = (const u16*) packed;
+
+#define VPHN_FIELD_UNUSED (0xffff)
+#define VPHN_FIELD_MSB (0x8000)
+#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
+
+ for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
+ if (*field == VPHN_FIELD_UNUSED) {
+ /* All significant fields processed, and remaining
+ * fields contain the reserved value of all 1's.
+ * Just store them.
+ */
+ unpacked[i] = *((u32*)field);
+ field += 2;
+ }
+ else if (*field & VPHN_FIELD_MSB) {
+ /* Data is in the lower 15 bits of this field */
+ unpacked[i] = *field & VPHN_FIELD_MASK;
+ field++;
+ nr_assoc_doms++;
+ }
+ else {
+ /* Data is in the lower 15 bits of this field
+ * concatenated with the next 16 bit field
+ */
+ unpacked[i] = *((u32*)field);
+ field += 2;
+ nr_assoc_doms++;
+ }
+ }
+
+ return nr_assoc_doms;
+}
+
+/*
+ * Retrieve the new associativity information for a virtual processor's
+ * home node.
+ */
+static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
+{
+ long rc = 0;
+ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ u64 flags = 1;
+ int hwcpu = get_hard_smp_processor_id(cpu);
+
+ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
+ vphn_unpack_associativity(retbuf, associativity);
+
+ return rc;
+}
+
+static long vphn_get_associativity(unsigned long cpu,
+ unsigned int *associativity)
+{
+ long rc = 0;
+
+ rc = hcall_vphn(cpu, associativity);
+
+ switch (rc) {
+ case H_FUNCTION:
+ printk(KERN_INFO
+ "VPHN is not supported. Disabling polling...\n");
+ stop_topology_update();
+ break;
+ case H_HARDWARE:
+ printk(KERN_ERR
+ "hcall_vphn() experienced a hardware fault "
+ "preventing VPHN. Disabling polling...\n");
+ stop_topology_update();
+ }
+
+ return rc;
+}
+
+/*
+ * Update the node maps and sysfs entries for each cpu whose home node
+ * has changed.
+ */
+int arch_update_cpu_topology(void)
+{
+ int cpu = 0, nid = 0, old_nid = 0;
+ unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
+ struct sys_device *sysdev = NULL;
+
+ for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
+ vphn_get_associativity(cpu, associativity);
+ nid = associativity_to_nid(associativity);
+
+ if (nid < 0 || !node_online(nid))
+ nid = first_online_node;
+
+ old_nid = numa_cpu_lookup_table[cpu];
+
+ /* Disable hotplug while we update the cpu
+ * masks and sysfs.
+ */
+ get_online_cpus();
+ unregister_cpu_under_node(cpu, old_nid);
+ unmap_cpu_from_node(cpu);
+ map_cpu_to_node(cpu, nid);
+ register_cpu_under_node(cpu, nid);
+ put_online_cpus();
+
+ sysdev = get_cpu_sysdev(cpu);
+ if (sysdev)
+ kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ }
+
+ return 1;
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+ rebuild_sched_domains();
+}
+static DECLARE_WORK(topology_work, topology_work_fn);
+
+void topology_schedule_update(void)
+{
+ schedule_work(&topology_work);
+}
+
+static void topology_timer_fn(unsigned long ignored)
+{
+ if (!vphn_enabled)
+ return;
+ if (update_cpu_associativity_changes_mask() > 0)
+ topology_schedule_update();
+ set_topology_timer();
+}
+static struct timer_list topology_timer =
+ TIMER_INITIALIZER(topology_timer_fn, 0, 0);
+
+static void set_topology_timer(void)
+{
+ topology_timer.data = 0;
+ topology_timer.expires = jiffies + 60 * HZ;
+ add_timer(&topology_timer);
+}
+
+/*
+ * Start polling for VPHN associativity changes.
+ */
+int start_topology_update(void)
+{
+ int rc = 0;
+
+ if (firmware_has_feature(FW_FEATURE_VPHN)) {
+ vphn_enabled = 1;
+ setup_cpu_associativity_change_counters();
+ init_timer_deferrable(&topology_timer);
+ set_topology_timer();
+ rc = 1;
+ }
+
+ return rc;
+}
+__initcall(start_topology_update);
+
+/*
+ * Disable polling for VPHN associativity changes.
+ */
+int stop_topology_update(void)
+{
+ vphn_enabled = 0;
+ return del_timer_sync(&topology_timer);
+}
+#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a87ead0138b..8dc41c0157f 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -78,7 +78,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
/* pgdir take page or two with 4K pages and a page fraction otherwise */
#ifndef CONFIG_PPC_4K_PAGES
- ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+ ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
#else
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
PGDIR_ORDER - PAGE_SHIFT);
@@ -230,6 +230,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
return NULL;
+ area->phys_addr = p;
v = (unsigned long) area->addr;
} else {
v = (ioremap_bot -= size);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 21d6dfab794..88927a05cdc 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -223,6 +223,8 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
caller);
if (area == NULL)
return NULL;
+
+ area->phys_addr = paligned;
ret = __ioremap_at(paligned, area->addr, size, flags);
if (!ret)
vunmap(area->addr);
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 82ff326e079..c04d16df848 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -1,4 +1,7 @@
-obj-$(CONFIG_44x) := misc_44x.o idle.o
+obj-$(CONFIG_44x) += misc_44x.o
+ifneq ($(CONFIG_PPC4xx_CPM),y)
+obj-$(CONFIG_44x) += idle.o
+endif
obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
obj-$(CONFIG_EBONY) += ebony.o
obj-$(CONFIG_SAM440EP) += sam440ep.o
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 956154f32cf..20576829eca 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -313,13 +313,14 @@ config OF_RTC
source "arch/powerpc/sysdev/bestcomm/Kconfig"
config MPC8xxx_GPIO
- bool "MPC8xxx GPIO support"
- depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx
+ bool "MPC512x/MPC8xxx GPIO support"
+ depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
+ FSL_SOC_BOOKE || PPC_86xx
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here if you're going to use hardware that connects to the
- MPC831x/834x/837x/8572/8610 GPIOs.
+ MPC512x/831x/834x/837x/8572/8610 GPIOs.
config SIMPLE_GPIO
bool "Support for simple, memory-mapped GPIO controllers"
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index beec405eb6f..3ce68556893 100644
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -76,7 +76,7 @@ static void __init celleb_init_direct_mapping(void)
static void celleb_dma_dev_setup(struct device *dev)
{
- dev->archdata.dma_ops = get_pci_dma_ops();
+ set_dma_ops(dev, &dma_direct_ops);
set_dma_offset(dev, celleb_dma_direct_offset);
}
@@ -106,7 +106,6 @@ static struct notifier_block celleb_of_bus_notifier = {
static int __init celleb_init_iommu(void)
{
celleb_init_direct_mapping();
- set_pci_dma_ops(&dma_direct_ops);
ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index a101abf1750..3b894f58528 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -36,10 +36,9 @@ static int spu_alloc_lscsa_std(struct spu_state *csa)
struct spu_lscsa *lscsa;
unsigned char *p;
- lscsa = vmalloc(sizeof(struct spu_lscsa));
+ lscsa = vzalloc(sizeof(struct spu_lscsa));
if (!lscsa)
return -ENOMEM;
- memset(lscsa, 0, sizeof(struct spu_lscsa));
csa->lscsa = lscsa;
/* Set LS pages reserved to allow for user-space mapping. */
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index 054dfe5b8e7..f803f4b8ab6 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -29,6 +29,10 @@
extern spinlock_t rtc_lock;
+#define NVRAM_AS0 0x74
+#define NVRAM_AS1 0x75
+#define NVRAM_DATA 0x77
+
static int nvram_as1 = NVRAM_AS1;
static int nvram_as0 = NVRAM_AS0;
static int nvram_data = NVRAM_DATA;
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 42d0a886de0..b5e026bdca2 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1045,71 +1045,9 @@ static const struct file_operations mf_side_proc_fops = {
.write = mf_side_proc_write,
};
-#if 0
-static void mf_getSrcHistory(char *buffer, int size)
-{
- struct IplTypeReturnStuff return_stuff;
- struct pending_event *ev = new_pending_event();
- int rc = 0;
- char *pages[4];
-
- pages[0] = kmalloc(4096, GFP_ATOMIC);
- pages[1] = kmalloc(4096, GFP_ATOMIC);
- pages[2] = kmalloc(4096, GFP_ATOMIC);
- pages[3] = kmalloc(4096, GFP_ATOMIC);
- if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
- || (pages[2] == NULL) || (pages[3] == NULL))
- return -ENOMEM;
-
- return_stuff.xType = 0;
- return_stuff.xRc = 0;
- return_stuff.xDone = 0;
- ev->event.hp_lp_event.xSubtype = 6;
- ev->event.hp_lp_event.x.xSubtypeData =
- subtype_data('M', 'F', 'V', 'I');
- ev->event.data.vsp_cmd.xEvent = &return_stuff;
- ev->event.data.vsp_cmd.cmd = 4;
- ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
- ev->event.data.vsp_cmd.result_code = 0xFF;
- ev->event.data.vsp_cmd.reserved = 0;
- ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
- ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
- ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
- ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
- mb();
- if (signal_event(ev) != 0)
- return;
-
- while (return_stuff.xDone != 1)
- udelay(10);
- if (return_stuff.xRc == 0)
- memcpy(buffer, pages[0], size);
- kfree(pages[0]);
- kfree(pages[1]);
- kfree(pages[2]);
- kfree(pages[3]);
-}
-#endif
-
static int mf_src_proc_show(struct seq_file *m, void *v)
{
-#if 0
- int len;
-
- mf_getSrcHistory(page, count);
- len = count;
- len -= off;
- if (len < count) {
- *eof = 1;
- if (len <= 0)
- return 0;
- } else
- len = count;
- *start = page + off;
- return len;
-#else
return 0;
-#endif
}
static int mf_src_proc_open(struct inode *inode, struct file *file)
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 1f9fb2c5776..14943ef0191 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -156,20 +156,12 @@ static void iommu_table_iobmap_setup(void)
static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
{
- struct device_node *dn;
-
pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
if (!iommu_table_iobmap_inited) {
iommu_table_iobmap_inited = 1;
iommu_table_iobmap_setup();
}
-
- dn = pci_bus_to_OF_node(bus);
-
- if (dn)
- PCI_DN(dn)->iommu_table = &iommu_table_iobmap;
-
}
@@ -192,9 +184,6 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
}
-static void pci_dma_bus_setup_null(struct pci_bus *b) { }
-static void pci_dma_dev_setup_null(struct pci_dev *d) { }
-
int __init iob_init(struct device_node *dn)
{
unsigned long tmp;
@@ -251,14 +240,8 @@ void __init iommu_init_early_pasemi(void)
iommu_off = of_chosen &&
of_get_property(of_chosen, "linux,iommu-off", NULL);
#endif
- if (iommu_off) {
- /* Direct I/O, IOMMU off */
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null;
- set_pci_dma_ops(&dma_direct_ops);
-
+ if (iommu_off)
return;
- }
iob_init(NULL);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 9deb274841f..d5aceb7fb12 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -506,6 +506,15 @@ static int __init pmac_declare_of_platform_devices(void)
of_platform_device_create(np, "smu", NULL);
of_node_put(np);
}
+ np = of_find_node_by_type(NULL, "fcu");
+ if (np == NULL) {
+ /* Some machines have strangely broken device-tree */
+ np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
+ }
+ if (np) {
+ of_platform_device_create(np, "temperature", NULL);
+ of_node_put(np);
+ }
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 3139814f643..5d1b743dbe7 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -33,6 +33,16 @@ config PSERIES_MSI
depends on PCI_MSI && EEH
default y
+config PSERIES_ENERGY
+ tristate "pSeries energy management capabilities driver"
+ depends on PPC_PSERIES
+ default y
+ help
+ Provides interface to platform energy management capabilities
+ on supported PSERIES platforms.
+ Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
+ and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
+
config SCANLOG
tristate "Scanlog dump interface"
depends on RTAS_PROC && PPC_PSERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 59eb8bdaa79..fc5237810ec 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
obj-$(CONFIG_PSERIES_MSI) += msi.o
+obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 0a14d8cd314..0b0eff0cce3 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -55,6 +55,7 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_XDABR, "hcall-xdabr"},
{FW_FEATURE_MULTITCE, "hcall-multi-tce"},
{FW_FEATURE_SPLPAR, "hcall-splpar"},
+ {FW_FEATURE_VPHN, "hcall-vphn"},
};
/* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 48d20573e4d..fd05fdee576 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
#define STK_PARM(i) (48 + ((i)-3)*8)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a77bcaed80a..edea60b7ee9 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -140,7 +140,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
return ret;
}
-static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
+static DEFINE_PER_CPU(u64 *, tce_page);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
@@ -323,14 +323,13 @@ static void iommu_table_setparms(struct pci_controller *phb,
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
- const void *dma_window,
- int bussubno)
+ const void *dma_window)
{
unsigned long offset, size;
- tbl->it_busno = bussubno;
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
+ tbl->it_busno = phb->bus->number;
tbl->it_base = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
@@ -450,14 +449,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
if (!ppci->iommu_table) {
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
ppci->phb->node);
- iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
- bus->number);
+ iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
pr_debug(" created table: %p\n", ppci->iommu_table);
}
-
- if (pdn != dn)
- PCI_DN(dn)->iommu_table = ppci->iommu_table;
}
@@ -533,21 +528,11 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
}
pr_debug(" parent is %s\n", pdn->full_name);
- /* Check for parent == NULL so we don't try to setup the empty EADS
- * slots on POWER4 machines.
- */
- if (dma_window == NULL || pdn->parent == NULL) {
- pr_debug(" no dma window for device, linking to parent\n");
- set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
- return;
- }
-
pci = PCI_DN(pdn);
if (!pci->iommu_table) {
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
pci->phb->node);
- iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
- pci->phb->bus->number);
+ iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
pr_debug(" created table: %p\n", pci->iommu_table);
} else {
@@ -571,8 +556,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
switch (action) {
case PSERIES_RECONFIG_REMOVE:
- if (pci && pci->iommu_table &&
- of_get_property(np, "ibm,dma-window", NULL))
+ if (pci && pci->iommu_table)
iommu_free_table(pci->iommu_table, np->full_name);
break;
default:
@@ -589,13 +573,8 @@ static struct notifier_block iommu_reconfig_nb = {
/* These are called very early. */
void iommu_init_early_pSeries(void)
{
- if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) {
- /* Direct I/O, IOMMU off */
- ppc_md.pci_dma_dev_setup = NULL;
- ppc_md.pci_dma_bus_setup = NULL;
- set_pci_dma_ops(&dma_direct_ops);
+ if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
return;
- }
if (firmware_has_feature(FW_FEATURE_LPAR)) {
if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
@@ -622,3 +601,17 @@ void iommu_init_early_pSeries(void)
set_pci_dma_ops(&dma_iommu_ops);
}
+static int __init disable_multitce(char *str)
+{
+ if (strcmp(str, "off") == 0 &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ firmware_has_feature(FW_FEATURE_MULTITCE)) {
+ printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
+ ppc_md.tce_build = tce_build_pSeriesLP;
+ ppc_md.tce_free = tce_free_pSeriesLP;
+ powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
+ }
+ return 1;
+}
+
+__setup("multitce=", disable_multitce);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f129040d974..5d3ea9f60dd 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -627,6 +627,18 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}
+static int __init disable_bulk_remove(char *str)
+{
+ if (strcmp(str, "off") == 0 &&
+ firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
+ printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
+ powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
+ }
+ return 1;
+}
+
+__setup("bulk_remove=", disable_bulk_remove);
+
void __init hpte_init_lpar(void)
{
ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index bc3c7f2abd7..7e828ba29bc 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -22,11 +22,25 @@
#include <asm/prom.h>
#include <asm/machdep.h>
+/* Max bytes to read/write in one go */
+#define NVRW_CNT 0x20
+
static unsigned int nvram_size;
static int nvram_fetch, nvram_store;
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
+static long nvram_error_log_index = -1;
+static long nvram_error_log_size = 0;
+
+struct err_log_info {
+ int error_type;
+ unsigned int seq_num;
+};
+#define NVRAM_MAX_REQ 2079
+#define NVRAM_MIN_REQ 1055
+
+#define NVRAM_LOG_PART_NAME "ibm,rtas-log"
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
{
@@ -119,6 +133,197 @@ static ssize_t pSeries_nvram_get_size(void)
return nvram_size ? nvram_size : -ENODEV;
}
+
+/* nvram_write_error_log
+ *
+ * We need to buffer the error logs into nvram to ensure that we have
+ * the failure information to decode. If we have a severe error there
+ * is no way to guarantee that the OS or the machine is in a state to
+ * get back to user land and write the error to disk. For example if
+ * the SCSI device driver causes a Machine Check by writing to a bad
+ * IO address, there is no way of guaranteeing that the device driver
+ * is in any state that is would also be able to write the error data
+ * captured to disk, thus we buffer it in NVRAM for analysis on the
+ * next boot.
+ *
+ * In NVRAM the partition containing the error log buffer will looks like:
+ * Header (in bytes):
+ * +-----------+----------+--------+------------+------------------+
+ * | signature | checksum | length | name | data |
+ * |0 |1 |2 3|4 15|16 length-1|
+ * +-----------+----------+--------+------------+------------------+
+ *
+ * The 'data' section would look like (in bytes):
+ * +--------------+------------+-----------------------------------+
+ * | event_logged | sequence # | error log |
+ * |0 3|4 7|8 nvram_error_log_size-1|
+ * +--------------+------------+-----------------------------------+
+ *
+ * event_logged: 0 if event has not been logged to syslog, 1 if it has
+ * sequence #: The unique sequence # for each event. (until it wraps)
+ * error log: The error log from event_scan
+ */
+int nvram_write_error_log(char * buff, int length,
+ unsigned int err_type, unsigned int error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (nvram_error_log_index == -1) {
+ return -ESPIPE;
+ }
+
+ if (length > nvram_error_log_size) {
+ length = nvram_error_log_size;
+ }
+
+ info.error_type = err_type;
+ info.seq_num = error_log_cnt;
+
+ tmp_index = nvram_error_log_index;
+
+ rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
+ if (rc <= 0) {
+ printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+ return rc;
+ }
+
+ rc = ppc_md.nvram_write(buff, length, &tmp_index);
+ if (rc <= 0) {
+ printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* nvram_read_error_log
+ *
+ * Reads nvram for error log for at most 'length'
+ */
+int nvram_read_error_log(char * buff, int length,
+ unsigned int * err_type, unsigned int * error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (nvram_error_log_index == -1)
+ return -1;
+
+ if (length > nvram_error_log_size)
+ length = nvram_error_log_size;
+
+ tmp_index = nvram_error_log_index;
+
+ rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
+ if (rc <= 0) {
+ printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+ return rc;
+ }
+
+ rc = ppc_md.nvram_read(buff, length, &tmp_index);
+ if (rc <= 0) {
+ printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+ return rc;
+ }
+
+ *error_log_cnt = info.seq_num;
+ *err_type = info.error_type;
+
+ return 0;
+}
+
+/* This doesn't actually zero anything, but it sets the event_logged
+ * word to tell that this event is safely in syslog.
+ */
+int nvram_clear_error_log(void)
+{
+ loff_t tmp_index;
+ int clear_word = ERR_FLAG_ALREADY_LOGGED;
+ int rc;
+
+ if (nvram_error_log_index == -1)
+ return -1;
+
+ tmp_index = nvram_error_log_index;
+
+ rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
+ if (rc <= 0) {
+ printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* pseries_nvram_init_log_partition
+ *
+ * This will setup the partition we need for buffering the
+ * error logs and cleanup partitions if needed.
+ *
+ * The general strategy is the following:
+ * 1.) If there is log partition large enough then use it.
+ * 2.) If there is none large enough, search
+ * for a free partition that is large enough.
+ * 3.) If there is not a free partition large enough remove
+ * _all_ OS partitions and consolidate the space.
+ * 4.) Will first try getting a chunk that will satisfy the maximum
+ * error log size (NVRAM_MAX_REQ).
+ * 5.) If the max chunk cannot be allocated then try finding a chunk
+ * that will satisfy the minum needed (NVRAM_MIN_REQ).
+ */
+static int __init pseries_nvram_init_log_partition(void)
+{
+ loff_t p;
+ int size;
+
+ /* Scan nvram for partitions */
+ nvram_scan_partitions();
+
+ /* Lookg for ours */
+ p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size);
+
+ /* Found one but too small, remove it */
+ if (p && size < NVRAM_MIN_REQ) {
+ pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition"
+ ",removing it...");
+ nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS);
+ p = 0;
+ }
+
+ /* Create one if we didn't find */
+ if (!p) {
+ p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS,
+ NVRAM_MAX_REQ, NVRAM_MIN_REQ);
+ /* No room for it, try to get rid of any OS partition
+ * and try again
+ */
+ if (p == -ENOSPC) {
+ pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME
+ " partition, deleting all OS partitions...");
+ nvram_remove_partition(NULL, NVRAM_SIG_OS);
+ p = nvram_create_partition(NVRAM_LOG_PART_NAME,
+ NVRAM_SIG_OS, NVRAM_MAX_REQ,
+ NVRAM_MIN_REQ);
+ }
+ }
+
+ if (p <= 0) {
+ pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME
+ " partition, err %d\n", (int)p);
+ return 0;
+ }
+
+ nvram_error_log_index = p;
+ nvram_error_log_size = nvram_get_partition_size(p) -
+ sizeof(struct err_log_info);
+
+ return 0;
+}
+machine_arch_initcall(pseries, pseries_nvram_init_log_partition);
+
int __init pSeries_nvram_init(void)
{
struct device_node *nvram;
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
new file mode 100644
index 00000000000..c8b3c69fe89
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -0,0 +1,326 @@
+/*
+ * POWER platform energy management driver
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This pseries platform device driver provides access to
+ * platform energy management capabilities.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <asm/cputhreads.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+
+
+#define MODULE_VERS "1.0"
+#define MODULE_NAME "pseries_energy"
+
+/* Driver flags */
+
+static int sysfs_entries;
+
+/* Helper routines */
+
+/*
+ * Routine to detect firmware support for hcall
+ * return 1 if H_BEST_ENERGY is supported
+ * else return 0
+ */
+
+static int check_for_h_best_energy(void)
+{
+ struct device_node *rtas = NULL;
+ const char *hypertas, *s;
+ int length;
+ int rc = 0;
+
+ rtas = of_find_node_by_path("/rtas");
+ if (!rtas)
+ return 0;
+
+ hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
+ if (!hypertas) {
+ of_node_put(rtas);
+ return 0;
+ }
+
+ /* hypertas will have list of strings with hcall names */
+ for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
+ if (!strncmp("hcall-best-energy-1", s, 19)) {
+ rc = 1; /* Found the string */
+ break;
+ }
+ }
+ of_node_put(rtas);
+ return rc;
+}
+
+/* Helper Routines to convert between drc_index to cpu numbers */
+
+static u32 cpu_to_drc_index(int cpu)
+{
+ struct device_node *dn = NULL;
+ const int *indexes;
+ int i;
+ int rc = 1;
+ u32 ret = 0;
+
+ dn = of_find_node_by_path("/cpus");
+ if (dn == NULL)
+ goto err;
+ indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+ if (indexes == NULL)
+ goto err_of_node_put;
+ /* Convert logical cpu number to core number */
+ i = cpu_core_index_of_thread(cpu);
+ /*
+ * The first element indexes[0] is the number of drc_indexes
+ * returned in the list. Hence i+1 will get the drc_index
+ * corresponding to core number i.
+ */
+ WARN_ON(i > indexes[0]);
+ ret = indexes[i + 1];
+ rc = 0;
+
+err_of_node_put:
+ of_node_put(dn);
+err:
+ if (rc)
+ printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
+ return ret;
+}
+
+static int drc_index_to_cpu(u32 drc_index)
+{
+ struct device_node *dn = NULL;
+ const int *indexes;
+ int i, cpu = 0;
+ int rc = 1;
+
+ dn = of_find_node_by_path("/cpus");
+ if (dn == NULL)
+ goto err;
+ indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+ if (indexes == NULL)
+ goto err_of_node_put;
+ /*
+ * First element in the array is the number of drc_indexes
+ * returned. Search through the list to find the matching
+ * drc_index and get the core number
+ */
+ for (i = 0; i < indexes[0]; i++) {
+ if (indexes[i + 1] == drc_index)
+ break;
+ }
+ /* Convert core number to logical cpu number */
+ cpu = cpu_first_thread_of_core(i);
+ rc = 0;
+
+err_of_node_put:
+ of_node_put(dn);
+err:
+ if (rc)
+ printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index);
+ return cpu;
+}
+
+/*
+ * pseries hypervisor call H_BEST_ENERGY provides hints to OS on
+ * preferred logical cpus to activate or deactivate for optimized
+ * energy consumption.
+ */
+
+#define FLAGS_MODE1 0x004E200000080E01
+#define FLAGS_MODE2 0x004E200000080401
+#define FLAGS_ACTIVATE 0x100
+
+static ssize_t get_best_energy_list(char *page, int activate)
+{
+ int rc, cnt, i, cpu;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long flags = 0;
+ u32 *buf_page;
+ char *s = page;
+
+ buf_page = (u32 *) get_zeroed_page(GFP_KERNEL);
+ if (!buf_page)
+ return -ENOMEM;
+
+ flags = FLAGS_MODE1;
+ if (activate)
+ flags |= FLAGS_ACTIVATE;
+
+ rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page),
+ 0, 0, 0, 0, 0, 0);
+ if (rc != H_SUCCESS) {
+ free_page((unsigned long) buf_page);
+ return -EINVAL;
+ }
+
+ cnt = retbuf[0];
+ for (i = 0; i < cnt; i++) {
+ cpu = drc_index_to_cpu(buf_page[2*i+1]);
+ if ((cpu_online(cpu) && !activate) ||
+ (!cpu_online(cpu) && activate))
+ s += sprintf(s, "%d,", cpu);
+ }
+ if (s > page) { /* Something to show */
+ s--; /* Suppress last comma */
+ s += sprintf(s, "\n");
+ }
+
+ free_page((unsigned long) buf_page);
+ return s-page;
+}
+
+static ssize_t get_best_energy_data(struct sys_device *dev,
+ char *page, int activate)
+{
+ int rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long flags = 0;
+
+ flags = FLAGS_MODE2;
+ if (activate)
+ flags |= FLAGS_ACTIVATE;
+
+ rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags,
+ cpu_to_drc_index(dev->id),
+ 0, 0, 0, 0, 0, 0, 0);
+
+ if (rc != H_SUCCESS)
+ return -EINVAL;
+
+ return sprintf(page, "%lu\n", retbuf[1] >> 32);
+}
+
+/* Wrapper functions */
+
+static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *page)
+{
+ return get_best_energy_list(page, 1);
+}
+
+static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *page)
+{
+ return get_best_energy_list(page, 0);
+}
+
+static ssize_t percpu_activate_hint_show(struct sys_device *dev,
+ struct sysdev_attribute *attr, char *page)
+{
+ return get_best_energy_data(dev, page, 1);
+}
+
+static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
+ struct sysdev_attribute *attr, char *page)
+{
+ return get_best_energy_data(dev, page, 0);
+}
+
+/*
+ * Create sysfs interface:
+ * /sys/devices/system/cpu/pseries_activate_hint_list
+ * /sys/devices/system/cpu/pseries_deactivate_hint_list
+ * Comma separated list of cpus to activate or deactivate
+ * /sys/devices/system/cpu/cpuN/pseries_activate_hint
+ * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint
+ * Per-cpu value of the hint
+ */
+
+struct sysdev_class_attribute attr_cpu_activate_hint_list =
+ _SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
+ cpu_activate_hint_list_show, NULL);
+
+struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
+ _SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
+ cpu_deactivate_hint_list_show, NULL);
+
+struct sysdev_attribute attr_percpu_activate_hint =
+ _SYSDEV_ATTR(pseries_activate_hint, 0444,
+ percpu_activate_hint_show, NULL);
+
+struct sysdev_attribute attr_percpu_deactivate_hint =
+ _SYSDEV_ATTR(pseries_deactivate_hint, 0444,
+ percpu_deactivate_hint_show, NULL);
+
+static int __init pseries_energy_init(void)
+{
+ int cpu, err;
+ struct sys_device *cpu_sys_dev;
+
+ if (!check_for_h_best_energy()) {
+ printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
+ return 0;
+ }
+ /* Create the sysfs files */
+ err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+ &attr_cpu_activate_hint_list.attr);
+ if (!err)
+ err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+ &attr_cpu_deactivate_hint_list.attr);
+
+ if (err)
+ return err;
+ for_each_possible_cpu(cpu) {
+ cpu_sys_dev = get_cpu_sysdev(cpu);
+ err = sysfs_create_file(&cpu_sys_dev->kobj,
+ &attr_percpu_activate_hint.attr);
+ if (err)
+ break;
+ err = sysfs_create_file(&cpu_sys_dev->kobj,
+ &attr_percpu_deactivate_hint.attr);
+ if (err)
+ break;
+ }
+
+ if (err)
+ return err;
+
+ sysfs_entries = 1; /* Removed entries on cleanup */
+ return 0;
+
+}
+
+static void __exit pseries_energy_cleanup(void)
+{
+ int cpu;
+ struct sys_device *cpu_sys_dev;
+
+ if (!sysfs_entries)
+ return;
+
+ /* Remove the sysfs files */
+ sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+ &attr_cpu_activate_hint_list.attr);
+
+ sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+ &attr_cpu_deactivate_hint_list.attr);
+
+ for_each_possible_cpu(cpu) {
+ cpu_sys_dev = get_cpu_sysdev(cpu);
+ sysfs_remove_file(&cpu_sys_dev->kobj,
+ &attr_percpu_activate_hint.attr);
+ sysfs_remove_file(&cpu_sys_dev->kobj,
+ &attr_percpu_deactivate_hint.attr);
+ }
+}
+
+module_init(pseries_energy_init);
+module_exit(pseries_energy_cleanup);
+MODULE_DESCRIPTION("Driver for pSeries platform energy management");
+MODULE_AUTHOR("Vaidyanathan Srinivasan");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 0bef9dacb64..9c297347914 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
ifeq ($(CONFIG_PCI),y)
obj-$(CONFIG_4xx) += ppc4xx_pci.o
endif
+obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o
obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
obj-$(CONFIG_CPM) += cpm_common.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 17cf15ec38b..8e9e06a7ca5 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -312,17 +312,10 @@ static void pci_dma_dev_setup_dart(struct pci_dev *dev)
static void pci_dma_bus_setup_dart(struct pci_bus *bus)
{
- struct device_node *dn;
-
if (!iommu_table_dart_inited) {
iommu_table_dart_inited = 1;
iommu_table_dart_setup();
}
-
- dn = pci_bus_to_OF_node(bus);
-
- if (dn)
- PCI_DN(dn)->iommu_table = &iommu_table_dart;
}
static bool dart_device_on_pcie(struct device *dev)
@@ -373,7 +366,7 @@ void __init iommu_init_early_dart(void)
if (dn == NULL) {
dn = of_find_compatible_node(NULL, "dart", "u4-dart");
if (dn == NULL)
- goto bail;
+ return; /* use default direct_dma_ops */
dart_is_u4 = 1;
}
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index c0ea05e87f1..c48cd817807 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -1,5 +1,5 @@
/*
- * GPIOs on MPC8349/8572/8610 and compatible
+ * GPIOs on MPC512x/8349/8572/8610 and compatible
*
* Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
*
@@ -26,6 +26,7 @@
#define GPIO_IER 0x0c
#define GPIO_IMR 0x10
#define GPIO_ICR 0x14
+#define GPIO_ICR2 0x18
struct mpc8xxx_gpio_chip {
struct of_mm_gpio_chip mm_gc;
@@ -37,6 +38,7 @@ struct mpc8xxx_gpio_chip {
*/
u32 data;
struct irq_host *irq;
+ void *of_dev_id_data;
};
static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -215,6 +217,51 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
return 0;
}
+static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long gpio = virq_to_hw(virq);
+ void __iomem *reg;
+ unsigned int shift;
+ unsigned long flags;
+
+ if (gpio < 16) {
+ reg = mm->regs + GPIO_ICR;
+ shift = (15 - gpio) * 2;
+ } else {
+ reg = mm->regs + GPIO_ICR2;
+ shift = (15 - (gpio % 16)) * 2;
+ }
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrsetbits_be32(reg, 3 << shift, 2 << shift);
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrsetbits_be32(reg, 3 << shift, 1 << shift);
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrbits32(reg, 3 << shift);
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct irq_chip mpc8xxx_irq_chip = {
.name = "mpc8xxx-gpio",
.unmask = mpc8xxx_irq_unmask,
@@ -226,6 +273,11 @@ static struct irq_chip mpc8xxx_irq_chip = {
static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
+
+ if (mpc8xxx_gc->of_dev_id_data)
+ mpc8xxx_irq_chip.set_type = mpc8xxx_gc->of_dev_id_data;
+
set_irq_chip_data(virq, h->host_data);
set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
set_irq_type(virq, IRQ_TYPE_NONE);
@@ -253,11 +305,20 @@ static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
.xlate = mpc8xxx_gpio_irq_xlate,
};
+static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+ { .compatible = "fsl,mpc8349-gpio", },
+ { .compatible = "fsl,mpc8572-gpio", },
+ { .compatible = "fsl,mpc8610-gpio", },
+ { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+ {}
+};
+
static void __init mpc8xxx_add_controller(struct device_node *np)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
+ const struct of_device_id *id;
unsigned hwirq;
int ret;
@@ -297,6 +358,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
if (!mpc8xxx_gc->irq)
goto skip_irq;
+ id = of_match_node(mpc8xxx_gpio_ids, np);
+ if (id)
+ mpc8xxx_gc->of_dev_id_data = id->data;
+
mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
/* ack and mask all irqs */
@@ -321,13 +386,7 @@ static int __init mpc8xxx_add_gpiochips(void)
{
struct device_node *np;
- for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio")
- mpc8xxx_add_controller(np);
-
- for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio")
- mpc8xxx_add_controller(np);
-
- for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio")
+ for_each_matching_node(np, mpc8xxx_gpio_ids)
mpc8xxx_add_controller(np);
for_each_compatible_node(np, NULL, "fsl,qoriq-gpio")
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
new file mode 100644
index 00000000000..73b86cc5ea7
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -0,0 +1,346 @@
+/*
+ * PowerPC 4xx Clock and Power Management
+ *
+ * Copyright (C) 2010, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@apm.com)
+ *
+ * Based on arch/powerpc/platforms/44x/idle.c:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Copyright 2008 IBM Corp.
+ *
+ * Based on arch/powerpc/sysdev/fsl_pmc.c:
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Copyright 2009 MontaVista Software, Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/suspend.h>
+#include <asm/dcr.h>
+#include <asm/dcr-native.h>
+#include <asm/machdep.h>
+
+#define CPM_ER 0
+#define CPM_FR 1
+#define CPM_SR 2
+
+#define CPM_IDLE_WAIT 0
+#define CPM_IDLE_DOZE 1
+
+struct cpm {
+ dcr_host_t dcr_host;
+ unsigned int dcr_offset[3];
+ unsigned int powersave_off;
+ unsigned int unused;
+ unsigned int idle_doze;
+ unsigned int standby;
+ unsigned int suspend;
+};
+
+static struct cpm cpm;
+
+struct cpm_idle_mode {
+ unsigned int enabled;
+ const char *name;
+};
+
+static struct cpm_idle_mode idle_mode[] = {
+ [CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
+ [CPM_IDLE_DOZE] = { 0, "doze" },
+};
+
+static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
+{
+ unsigned int value;
+
+ /* CPM controller supports 3 different types of sleep interface
+ * known as class 1, 2 and 3. For class 1 units, they are
+ * unconditionally put to sleep when the corresponding CPM bit is
+ * set. For class 2 and 3 units this is not case; if they can be
+ * put to to sleep, they will. Here we do not verify, we just
+ * set them and expect them to eventually go off when they can.
+ */
+ value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
+ dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
+
+ /* return old state, to restore later if needed */
+ return value;
+}
+
+static void cpm_idle_wait(void)
+{
+ unsigned long msr_save;
+
+ /* save off initial state */
+ msr_save = mfmsr();
+ /* sync required when CPM0_ER[CPU] is set */
+ mb();
+ /* set wait state MSR */
+ mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
+ isync();
+ /* return to initial state */
+ mtmsr(msr_save);
+ isync();
+}
+
+static void cpm_idle_sleep(unsigned int mask)
+{
+ unsigned int er_save;
+
+ /* update CPM_ER state */
+ er_save = cpm_set(CPM_ER, mask);
+
+ /* go to wait state so that CPM0_ER[CPU] can take effect */
+ cpm_idle_wait();
+
+ /* restore CPM_ER state */
+ dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
+}
+
+static void cpm_idle_doze(void)
+{
+ cpm_idle_sleep(cpm.idle_doze);
+}
+
+static void cpm_idle_config(int mode)
+{
+ int i;
+
+ if (idle_mode[mode].enabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
+ idle_mode[i].enabled = 0;
+
+ idle_mode[mode].enabled = 1;
+}
+
+static ssize_t cpm_idle_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+ if (idle_mode[i].enabled)
+ s += sprintf(s, "[%s] ", idle_mode[i].name);
+ else
+ s += sprintf(s, "%s ", idle_mode[i].name);
+ }
+
+ *(s-1) = '\n'; /* convert the last space to a newline */
+
+ return s - buf;
+}
+
+static ssize_t cpm_idle_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int i;
+ char *p;
+ int len;
+
+ p = memchr(buf, '\n', n);
+ len = p ? p - buf : n;
+
+ for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+ if (strncmp(buf, idle_mode[i].name, len) == 0) {
+ cpm_idle_config(i);
+ return n;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct kobj_attribute cpm_idle_attr =
+ __ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
+
+static void cpm_idle_config_sysfs(void)
+{
+ struct sys_device *sys_dev;
+ unsigned long ret;
+
+ sys_dev = get_cpu_sysdev(0);
+
+ ret = sysfs_create_file(&sys_dev->kobj,
+ &cpm_idle_attr.attr);
+ if (ret)
+ printk(KERN_WARNING
+ "cpm: failed to create idle sysfs entry\n");
+}
+
+static void cpm_idle(void)
+{
+ if (idle_mode[CPM_IDLE_DOZE].enabled)
+ cpm_idle_doze();
+ else
+ cpm_idle_wait();
+}
+
+static int cpm_suspend_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return !!cpm.standby;
+ case PM_SUSPEND_MEM:
+ return !!cpm.suspend;
+ default:
+ return 0;
+ }
+}
+
+static void cpm_suspend_standby(unsigned int mask)
+{
+ unsigned long tcr_save;
+
+ /* disable decrement interrupt */
+ tcr_save = mfspr(SPRN_TCR);
+ mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
+
+ /* go to sleep state */
+ cpm_idle_sleep(mask);
+
+ /* restore decrement interrupt */
+ mtspr(SPRN_TCR, tcr_save);
+}
+
+static int cpm_suspend_enter(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ cpm_suspend_standby(cpm.standby);
+ break;
+ case PM_SUSPEND_MEM:
+ cpm_suspend_standby(cpm.suspend);
+ break;
+ }
+
+ return 0;
+}
+
+static struct platform_suspend_ops cpm_suspend_ops = {
+ .valid = cpm_suspend_valid,
+ .enter = cpm_suspend_enter,
+};
+
+static int cpm_get_uint_property(struct device_node *np,
+ const char *name)
+{
+ int len;
+ const unsigned int *prop = of_get_property(np, name, &len);
+
+ if (prop == NULL || len < sizeof(u32))
+ return 0;
+
+ return *prop;
+}
+
+static int __init cpm_init(void)
+{
+ struct device_node *np;
+ int dcr_base, dcr_len;
+ int ret = 0;
+
+ if (!cpm.powersave_off) {
+ cpm_idle_config(CPM_IDLE_WAIT);
+ ppc_md.power_save = &cpm_idle;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
+ if (!np) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dcr_base = dcr_resource_start(np, 0);
+ dcr_len = dcr_resource_len(np, 0);
+
+ if (dcr_base == 0 || dcr_len == 0) {
+ printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
+ np->full_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
+
+ if (!DCR_MAP_OK(cpm.dcr_host)) {
+ printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
+ np->full_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* All 4xx SoCs with a CPM controller have one of two
+ * different order for the CPM registers. Some have the
+ * CPM registers in the following order (ER,FR,SR). The
+ * others have them in the following order (SR,ER,FR).
+ */
+
+ if (cpm_get_uint_property(np, "er-offset") == 0) {
+ cpm.dcr_offset[CPM_ER] = 0;
+ cpm.dcr_offset[CPM_FR] = 1;
+ cpm.dcr_offset[CPM_SR] = 2;
+ } else {
+ cpm.dcr_offset[CPM_ER] = 1;
+ cpm.dcr_offset[CPM_FR] = 2;
+ cpm.dcr_offset[CPM_SR] = 0;
+ }
+
+ /* Now let's see what IPs to turn off for the following modes */
+
+ cpm.unused = cpm_get_uint_property(np, "unused-units");
+ cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
+ cpm.standby = cpm_get_uint_property(np, "standby");
+ cpm.suspend = cpm_get_uint_property(np, "suspend");
+
+ /* If some IPs are unused let's turn them off now */
+
+ if (cpm.unused) {
+ cpm_set(CPM_ER, cpm.unused);
+ cpm_set(CPM_FR, cpm.unused);
+ }
+
+ /* Now let's export interfaces */
+
+ if (!cpm.powersave_off && cpm.idle_doze)
+ cpm_idle_config_sysfs();
+
+ if (cpm.standby || cpm.suspend)
+ suspend_set_ops(&cpm_suspend_ops);
+out:
+ if (np)
+ of_node_put(np);
+ return ret;
+}
+
+late_initcall(cpm_init);
+
+static int __init cpm_powersave_off(char *arg)
+{
+ cpm.powersave_off = 1;
+ return 0;
+}
+__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index c2d675b6392..ee056807b52 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -84,8 +84,8 @@ static int __init tsi108_eth_of_init(void)
memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
ret = of_address_to_resource(np, 0, &r[0]);
- DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
- __func__,r[0].name, r[0].start, r[0].end);
+ DBG("%s: name:start->end = %s:%pR\n",
+ __func__, r[0].name, &r[0]);
if (ret)
goto err;
@@ -93,8 +93,8 @@ static int __init tsi108_eth_of_init(void)
r[1].start = irq_of_parse_and_map(np, 0);
r[1].end = irq_of_parse_and_map(np, 0);
r[1].flags = IORESOURCE_IRQ;
- DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
- __func__,r[1].name, r[1].start, r[1].end);
+ DBG("%s: name:start->end = %s:%pR\n",
+ __func__, r[1].name, &r[1]);
tsi_eth_dev =
platform_device_register_simple("tsi-ethernet", i++, &r[0],
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 07ea908c510..3e5fc3bbf3e 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -14,6 +14,8 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
#include <linux/delay.h>
@@ -430,11 +432,18 @@ static struct resource sdhi0_cn3_resources[] = {
},
};
+static struct sh_mobile_sdhi_info sdhi0_cn3_data = {
+ .tmio_caps = MMC_CAP_SDIO_IRQ,
+};
+
static struct platform_device sdhi0_cn3_device = {
.name = "sh_mobile_sdhi",
.id = 0, /* "sdhi0" clock */
.num_resources = ARRAY_SIZE(sdhi0_cn3_resources),
.resource = sdhi0_cn3_resources,
+ .dev = {
+ .platform_data = &sdhi0_cn3_data,
+ },
.archdata = {
.hwblk_id = HWBLK_SDHI0,
},
@@ -453,11 +462,18 @@ static struct resource sdhi1_cn7_resources[] = {
},
};
+static struct sh_mobile_sdhi_info sdhi1_cn7_data = {
+ .tmio_caps = MMC_CAP_SDIO_IRQ,
+};
+
static struct platform_device sdhi1_cn7_device = {
.name = "sh_mobile_sdhi",
.id = 1, /* "sdhi1" clock */
.num_resources = ARRAY_SIZE(sdhi1_cn7_resources),
.resource = sdhi1_cn7_resources,
+ .dev = {
+ .platform_data = &sdhi1_cn7_data,
+ },
.archdata = {
.hwblk_id = HWBLK_SDHI1,
},
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index f48c492a68d..33b662999fc 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -473,6 +473,7 @@ static struct sh_mobile_sdhi_info sdhi0_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
.set_pwr = sdhi0_set_pwr,
+ .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
};
static struct resource sdhi0_resources[] = {
@@ -511,6 +512,7 @@ static void sdhi1_set_pwr(struct platform_device *pdev, int state)
static struct sh_mobile_sdhi_info sdhi1_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
+ .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
.set_pwr = sdhi1_set_pwr,
};
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 9b60eaabf8f..7504daaa85d 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
#include <linux/mfd/tmio.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/onenand.h>
@@ -366,6 +367,7 @@ static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
+ .tmio_caps = MMC_CAP_SDIO_IRQ,
};
static struct platform_device kfr2r09_sh_sdhi0_device = {
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index c8acfec9869..03a7ffe729d 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -13,6 +13,7 @@
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
@@ -410,6 +411,7 @@ static struct resource sdhi_cn9_resources[] = {
static struct sh_mobile_sdhi_info sh7724_sdhi_data = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
+ .tmio_caps = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi_cn9_device = {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 527a0cd956b..6dc7407f2cd 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/smc91x.h>
@@ -467,6 +468,7 @@ static struct resource sdhi0_cn7_resources[] = {
static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
+ .tmio_caps = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi0_cn7_device = {
@@ -498,6 +500,7 @@ static struct resource sdhi1_cn8_resources[] = {
static struct sh_mobile_sdhi_info sh7724_sdhi1_data = {
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
+ .tmio_caps = MMC_CAP_SDIO_IRQ,
};
static struct platform_device sdhi1_cn8_device = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 3681cafdb4a..b8e5bc80aa4 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -360,6 +360,8 @@ void __init plat_early_device_setup(void)
enum {
UNUSED = 0,
+ ENABLED,
+ DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -375,15 +377,13 @@ enum {
I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI,
- IRDA,
- SDHI0, SDHI1, SDHI2, SDHI3,
- CMT, TSIF, SIU,
+ IRDA, SDHI, CMT, TSIF, SIU,
TMU0, TMU1, TMU2,
JPU, LCDC,
/* interrupt groups */
- DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, SDHI, USB,
+ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB,
};
static struct intc_vect vectors[] __initdata = {
@@ -412,8 +412,8 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20),
INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60),
- INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
- INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -431,7 +431,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI),
- INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
INTC_GROUP(USB, USBI0, USBI1),
};
@@ -452,7 +451,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USBI1, USBI0 } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -488,9 +487,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7343", vectors, groups,
- mask_registers, prio_registers, sense_registers,
- ack_registers);
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7343",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 8dab9e1bbd8..9b3a6aa9081 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -319,6 +319,8 @@ void __init plat_early_device_setup(void)
enum {
UNUSED=0,
+ ENABLED,
+ DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -332,14 +334,13 @@ enum {
DENC, MSIOF,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
- SDHI0, SDHI1, SDHI2, SDHI3,
- CMT, TSIF, SIU,
+ SDHI, CMT, TSIF, SIU,
TMU0, TMU1, TMU2,
VEU2, LCDC,
/* interrupt groups */
- DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI,
+ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C,
};
static struct intc_vect vectors[] __initdata = {
@@ -364,8 +365,8 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
- INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
- INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -381,7 +382,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
- INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -403,7 +403,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -441,9 +441,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7366", vectors, groups,
- mask_registers, prio_registers, sense_registers,
- ack_registers);
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7366",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index d551ed8dea9..a164f892425 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -699,7 +699,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 0eadefdbbba..d7641221ee4 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -719,7 +719,7 @@ static struct intc_group groups[] __initdata = {
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
- 0, DISABLED, ENABLED, ENABLED } },
+ 0, ENABLED, ENABLED, ENABLED } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
@@ -736,7 +736,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { 0, DISABLED, ENABLED, ENABLED,
+ { 0, ENABLED, ENABLED, ENABLED,
0, 0, SCIFA_SCIFA2, SIU_SIUI } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 828c9657eb5..c598a7f61b7 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -1144,7 +1144,7 @@ static struct intc_group groups[] __initdata = {
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
- 0, DISABLED, ENABLED, ENABLED } },
+ 0, ENABLED, ENABLED, ENABLED } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
@@ -1166,7 +1166,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { DISABLED, DISABLED, ENABLED, ENABLED,
+ { DISABLED, ENABLED, ENABLED, ENABLED,
0, 0, SCIFA5, FSI } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index e447938d39c..0dc714fa23d 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -375,5 +375,5 @@ static int __init cpu_type_probe(void)
return 0;
}
-arch_initcall(cpu_type_probe);
+early_initcall(cpu_type_probe);
#endif
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index b87873c0e8e..ae96cf52a95 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -168,4 +168,4 @@ out_unregister:
return err;
}
-arch_initcall(pcr_arch_init);
+early_initcall(pcr_arch_init);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 55d106b5e31..211ca3f7fd1 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -185,17 +185,16 @@ struct bootnode;
#ifdef CONFIG_ACPI_NUMA
extern int acpi_numa;
-extern int acpi_get_nodes(struct bootnode *physnodes);
+extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+ unsigned long end);
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+#ifdef CONFIG_NUMA_EMU
extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
int num_nodes);
-#else
-static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
- int num_nodes)
-{
-}
#endif
+#endif /* CONFIG_ACPI_NUMA */
#define acpi_unlazy_tlb(x) leave_mm(x)
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 6aee50d655d..64dc82ee19f 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,16 +3,27 @@
#include <linux/pci.h>
+struct amd_nb_bus_dev_range {
+ u8 bus;
+ u8 dev_base;
+ u8 dev_limit;
+};
+
extern struct pci_device_id amd_nb_misc_ids[];
+extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
struct bootnode;
extern int early_is_amd_nb(u32 value);
extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
-extern int amd_get_nodes(struct bootnode *nodes);
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int amd_scan_nodes(void);
+#ifdef CONFIG_NUMA_EMU
+extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
+extern void amd_get_nodes(struct bootnode *nodes);
+#endif
+
struct amd_northbridge {
struct pci_dev *misc;
};
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0141b234406..4729b2b6311 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -116,11 +116,11 @@ enum fixed_addresses {
#endif
FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
- __end_of_permanent_fixed_addresses,
-
#ifdef CONFIG_X86_MRST
FIX_LNW_VRTC,
#endif
+ __end_of_permanent_fixed_addresses,
+
/*
* 256 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 49dbfdfa50f..91d915a6525 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -38,12 +38,9 @@ static inline int gpio_cansleep(unsigned int gpio)
return __gpio_cansleep(gpio);
}
-/*
- * Not implemented, yet.
- */
static inline int gpio_to_irq(unsigned int gpio)
{
- return -ENOSYS;
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index f23eb252846..ca242d35e87 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -18,7 +18,6 @@ enum die_val {
DIE_TRAP,
DIE_GPF,
DIE_CALL,
- DIE_NMI_IPI,
DIE_PAGE_FAULT,
DIE_NMIUNKNOWN,
};
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e47..72a8b52e7df 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -7,9 +7,19 @@
#include <asm/mc146818rtc.h>
+#define NMI_REASON_PORT 0x61
+
+#define NMI_REASON_SERR 0x80
+#define NMI_REASON_IOCHK 0x40
+#define NMI_REASON_MASK (NMI_REASON_SERR | NMI_REASON_IOCHK)
+
+#define NMI_REASON_CLEAR_SERR 0x04
+#define NMI_REASON_CLEAR_IOCHK 0x08
+#define NMI_REASON_CLEAR_MASK 0x0f
+
static inline unsigned char get_nmi_reason(void)
{
- return inb(0x61);
+ return inb(NMI_REASON_PORT);
}
static inline void reassert_nmi(void)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c4021b95351..c76f5b92b84 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -23,6 +23,26 @@ void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
#endif
+/*
+ * Define some priorities for the nmi notifier call chain.
+ *
+ * Create a local nmi bit that has a higher priority than
+ * external nmis, because the local ones are more frequent.
+ *
+ * Also setup some default high/normal/low settings for
+ * subsystems to registers with. Using 4 bits to seperate
+ * the priorities. This can go alot higher if needed be.
+ */
+
+#define NMI_LOCAL_SHIFT 16 /* randomly picked */
+#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
+#define NMI_HIGH_PRIOR (1ULL << 8)
+#define NMI_NORMAL_PRIOR (1ULL << 4)
+#define NMI_LOW_PRIOR (1ULL << 0)
+#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
+#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
+#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
+
void stop_nmi(void);
void restart_nmi(void);
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 823e070e7c2..5ae87285a50 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -38,7 +38,7 @@ extern void __cpuinit numa_add_cpu(int cpu);
extern void __cpuinit numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE ((u64)64 << 20)
+#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
#endif /* CONFIG_NUMA_EMU */
#else
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 295e2ff18a6..e2f6a99f14a 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -20,6 +20,9 @@
#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
#define ARCH_P4_MAX_CCCR (18)
+#define ARCH_P4_CNTRVAL_BITS (40)
+#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+
#define P4_ESCR_EVENT_MASK 0x7e000000U
#define P4_ESCR_EVENT_SHIFT 25
#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index affacb5e006..0a99f7198bc 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,13 @@ struct pci_device_id amd_nb_misc_ids[] = {
};
EXPORT_SYMBOL(amd_nb_misc_ids);
+const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
+ { 0x00, 0x18, 0x20 },
+ { 0xff, 0x00, 0x20 },
+ { 0xfe, 0x00, 0x20 },
+ { }
+};
+
struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index dcd7c83e165..5955a7800a9 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -39,18 +39,6 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
-struct bus_dev_range {
- int bus;
- int dev_base;
- int dev_limit;
-};
-
-static struct bus_dev_range bus_dev_ranges[] __initdata = {
- { 0x00, 0x18, 0x20},
- { 0xff, 0x00, 0x20},
- { 0xfe, 0x00, 0x20}
-};
-
static struct resource gart_resource = {
.name = "GART",
.flags = IORESOURCE_MEM,
@@ -294,13 +282,13 @@ void __init early_gart_iommu_check(void)
search_agp_bridge(&agp_aper_order, &valid_agp);
fix = 0;
- for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+ for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus;
int dev_base, dev_limit;
- bus = bus_dev_ranges[i].bus;
- dev_base = bus_dev_ranges[i].dev_base;
- dev_limit = bus_dev_ranges[i].dev_limit;
+ bus = amd_nb_bus_dev_ranges[i].bus;
+ dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+ dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -349,13 +337,13 @@ void __init early_gart_iommu_check(void)
return;
/* disable them all at first */
- for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus;
int dev_base, dev_limit;
- bus = bus_dev_ranges[i].bus;
- dev_base = bus_dev_ranges[i].dev_base;
- dev_limit = bus_dev_ranges[i].dev_limit;
+ bus = amd_nb_bus_dev_ranges[i].bus;
+ dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+ dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -390,14 +378,14 @@ int __init gart_iommu_hole_init(void)
fix = 0;
node = 0;
- for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus;
int dev_base, dev_limit;
u32 ctl;
- bus = bus_dev_ranges[i].bus;
- dev_base = bus_dev_ranges[i].dev_base;
- dev_limit = bus_dev_ranges[i].dev_limit;
+ bus = amd_nb_bus_dev_ranges[i].bus;
+ dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+ dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -505,7 +493,7 @@ out:
}
/* Fix up the north bridges */
- for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus, dev_base, dev_limit;
/*
@@ -514,9 +502,9 @@ out:
*/
u32 ctl = DISTLBWALKPRB | aper_order << 1;
- bus = bus_dev_ranges[i].bus;
- dev_base = bus_dev_ranges[i].dev_base;
- dev_limit = bus_dev_ranges[i].dev_limit;
+ bus = amd_nb_bus_dev_ranges[i].bus;
+ dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+ dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a51345ba449..06c196d7e59 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -684,7 +684,7 @@ static int __init calibrate_APIC_clock(void)
lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
lapic_clockevent.shift);
lapic_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+ clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &lapic_clockevent);
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 72ec29e1ae0..79fd43ca6f9 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -68,7 +68,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
switch (cmd) {
case DIE_NMI:
- case DIE_NMI_IPI:
break;
default:
@@ -96,7 +95,7 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
static __read_mostly struct notifier_block backtrace_notifier = {
.notifier_call = arch_trigger_all_cpu_backtrace_handler,
.next = NULL,
- .priority = 1
+ .priority = NMI_LOCAL_LOW_PRIOR,
};
static int __init register_trigger_all_cpu_backtrace(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ecca5f41ad2..bd16b58b885 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = {
static __cpuinit void set_x2apic_extra_bits(int pnode)
{
- __this_cpu_write(x2apic_extra_bits, (pnode << 6));
+ __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
}
/*
@@ -641,7 +641,7 @@ void __cpuinit uv_cpu_init(void)
*/
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
{
- if (reason != DIE_NMI_IPI)
+ if (reason != DIE_NMIUNKNOWN)
return NOTIFY_OK;
if (in_crash_kexec)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index e7dbde7bfed..a7797197956 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -25,6 +25,7 @@
#include <linux/gfp.h>
#include <asm/mce.h>
#include <asm/apic.h>
+#include <asm/nmi.h>
/* Update fake mce registers on current CPU. */
static void inject_mce(struct mce *m)
@@ -83,7 +84,7 @@ static int mce_raise_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm);
- if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
+ if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
return NOTIFY_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask);
if (m->inject_flags & MCJ_EXCEPTION)
@@ -95,7 +96,7 @@ static int mce_raise_notify(struct notifier_block *self,
static struct notifier_block mce_raise_nb = {
.notifier_call = mce_raise_notify,
- .priority = 1000,
+ .priority = NMI_LOCAL_NORMAL_PRIOR,
};
/* Inject mce on current CPU */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 04921017abe..9d977a2ea69 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1267,7 +1267,6 @@ perf_event_nmi_handler(struct notifier_block *self,
switch (cmd) {
case DIE_NMI:
- case DIE_NMI_IPI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
@@ -1317,7 +1316,7 @@ perf_event_nmi_handler(struct notifier_block *self,
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
- .priority = 1
+ .priority = NMI_LOCAL_LOW_PRIOR,
};
static struct event_constraint unconstrained;
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 81400b93e69..e56b9bfbabd 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -753,19 +753,21 @@ out:
static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
{
- int overflow = 0;
- u32 low, high;
+ u64 v;
- rdmsr(hwc->config_base + hwc->idx, low, high);
-
- /* we need to check high bit for unflagged overflows */
- if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
- overflow = 1;
- (void)checking_wrmsrl(hwc->config_base + hwc->idx,
- ((u64)low) & ~P4_CCCR_OVF);
+ /* an official way for overflow indication */
+ rdmsrl(hwc->config_base + hwc->idx, v);
+ if (v & P4_CCCR_OVF) {
+ wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
+ return 1;
}
- return overflow;
+ /* it might be unflagged overflow */
+ rdmsrl(hwc->event_base + hwc->idx, v);
+ if (!(v & ARCH_P4_CNTRVAL_MASK))
+ return 1;
+
+ return 0;
}
static void p4_pmu_disable_pebs(void)
@@ -1152,9 +1154,9 @@ static __initconst const struct x86_pmu p4_pmu = {
*/
.num_counters = ARCH_P4_MAX_CCCR,
.apic = 1,
- .cntval_bits = 40,
- .cntval_mask = (1ULL << 40) - 1,
- .max_period = (1ULL << 39) - 1,
+ .cntval_bits = ARCH_P4_CNTRVAL_BITS,
+ .cntval_mask = ARCH_P4_CNTRVAL_MASK,
+ .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
.hw_config = p4_hw_config,
.schedule_events = p4_pmu_schedule_events,
/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 8474c998cbd..d6fb146c0d8 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -197,14 +197,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
*/
void dump_stack(void)
{
- unsigned long bp = 0;
unsigned long stack;
-#ifdef CONFIG_FRAME_POINTER
- if (!bp)
- get_bp(bp);
-#endif
-
printk("Pid: %d, comm: %.20s %s %s %.*s\n",
current->pid, current->comm, print_tainted(),
init_utsname()->release,
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e3ba417e869..d3b895f375d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -299,17 +299,21 @@ ENDPROC(native_usergs_sysret64)
ENTRY(save_args)
XCPT_FRAME
cld
- movq_cfi rdi, RDI+16-ARGOFFSET
- movq_cfi rsi, RSI+16-ARGOFFSET
- movq_cfi rdx, RDX+16-ARGOFFSET
- movq_cfi rcx, RCX+16-ARGOFFSET
- movq_cfi rax, RAX+16-ARGOFFSET
- movq_cfi r8, R8+16-ARGOFFSET
- movq_cfi r9, R9+16-ARGOFFSET
- movq_cfi r10, R10+16-ARGOFFSET
- movq_cfi r11, R11+16-ARGOFFSET
-
- leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
+ /*
+ * start from rbp in pt_regs and jump over
+ * return address.
+ */
+ movq_cfi rdi, RDI+8-RBP
+ movq_cfi rsi, RSI+8-RBP
+ movq_cfi rdx, RDX+8-RBP
+ movq_cfi rcx, RCX+8-RBP
+ movq_cfi rax, RAX+8-RBP
+ movq_cfi r8, R8+8-RBP
+ movq_cfi r9, R9+8-RBP
+ movq_cfi r10, R10+8-RBP
+ movq_cfi r11, R11+8-RBP
+
+ leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
movq_cfi rbp, 8 /* push %rbp */
leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
testl $3, CS(%rdi)
@@ -782,8 +786,9 @@ END(interrupt)
/* 0(%rsp): ~(interrupt number) */
.macro interrupt func
- subq $ORIG_RAX-ARGOFFSET+8, %rsp
- CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
+ /* reserve pt_regs for scratch regs and rbp */
+ subq $ORIG_RAX-RBP, %rsp
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
call save_args
PARTIAL_FRAME 0
call \func
@@ -808,9 +813,14 @@ ret_from_intr:
TRACE_IRQS_OFF
decl PER_CPU_VAR(irq_count)
leaveq
+
CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
+
+ /* we did not save rbx, restore only from ARGOFFSET */
+ addq $8, %rsp
+ CFI_ADJUST_CFA_OFFSET -8
exit_intr:
GET_THREAD_INFO(%rcx)
testl $3,CS-ARGOFFSET(%rsp)
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index cd21b654dec..a4130005028 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -48,6 +48,7 @@
#include <asm/apicdef.h>
#include <asm/system.h>
#include <asm/apic.h>
+#include <asm/nmi.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
@@ -525,10 +526,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
}
return NOTIFY_DONE;
- case DIE_NMI_IPI:
- /* Just ignore, we will handle the roundup on DIE_NMI. */
- return NOTIFY_DONE;
-
case DIE_NMIUNKNOWN:
if (was_in_debug_nmi[raw_smp_processor_id()]) {
was_in_debug_nmi[raw_smp_processor_id()] = 0;
@@ -606,7 +603,7 @@ static struct notifier_block kgdb_notifier = {
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
- .priority = -INT_MAX,
+ .priority = NMI_LOCAL_LOW_PRIOR,
};
/**
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index c495aa8d481..fc7aae1e2bc 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -18,6 +18,7 @@
#include <asm/pci_x86.h>
#include <asm/virtext.h>
#include <asm/cpu.h>
+#include <asm/nmi.h>
#ifdef CONFIG_X86_32
# include <linux/ctype.h>
@@ -747,7 +748,7 @@ static int crash_nmi_callback(struct notifier_block *self,
{
int cpu;
- if (val != DIE_NMI_IPI)
+ if (val != DIE_NMI)
return NOTIFY_OK;
cpu = raw_smp_processor_id();
@@ -778,6 +779,8 @@ static void smp_send_nmi_allbutself(void)
static struct notifier_block crash_nmi_nb = {
.notifier_call = crash_nmi_callback,
+ /* we want to be the first one called */
+ .priority = NMI_LOCAL_HIGH_PRIOR+1,
};
/* Halt all other CPUs, calling the specified function on each of them
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c7149c96d07..763df77343d 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -97,12 +97,12 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
*/
static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
-void cpu_hotplug_driver_lock()
+void cpu_hotplug_driver_lock(void)
{
mutex_lock(&x86_cpu_hotplug_driver_mutex);
}
-void cpu_hotplug_driver_unlock()
+void cpu_hotplug_driver_unlock(void)
{
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c76aaca5694..b9b67166f9d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -84,6 +84,11 @@ EXPORT_SYMBOL_GPL(used_vectors);
static int ignore_nmis;
int unknown_nmi_panic;
+/*
+ * Prevent NMI reason port (0x61) being accessed simultaneously, can
+ * only be used in NMI handler.
+ */
+static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
static inline void conditional_sti(struct pt_regs *regs)
{
@@ -310,15 +315,15 @@ static int __init setup_unknown_nmi_panic(char *str)
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
static notrace __kprobes void
-mem_parity_error(unsigned char reason, struct pt_regs *regs)
+pci_serr_error(unsigned char reason, struct pt_regs *regs)
{
- printk(KERN_EMERG
- "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
- reason, smp_processor_id());
-
- printk(KERN_EMERG
- "You have some hardware problem, likely on the PCI bus.\n");
+ pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
+ /*
+ * On some machines, PCI SERR line is used to report memory
+ * errors. EDAC makes use of it.
+ */
#if defined(CONFIG_EDAC)
if (edac_handler_set()) {
edac_atomic_assert_error();
@@ -329,11 +334,11 @@ mem_parity_error(unsigned char reason, struct pt_regs *regs)
if (panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
- printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+ pr_emerg("Dazed and confused, but trying to continue\n");
- /* Clear and disable the memory parity error line. */
- reason = (reason & 0xf) | 4;
- outb(reason, 0x61);
+ /* Clear and disable the PCI SERR error line. */
+ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
+ outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
@@ -341,15 +346,17 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
{
unsigned long i;
- printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ pr_emerg(
+ "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
show_registers(regs);
if (panic_on_io_nmi)
panic("NMI IOCK error: Not continuing");
/* Re-enable the IOCK line, wait for a few seconds */
- reason = (reason & 0xf) | 8;
- outb(reason, 0x61);
+ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
+ outb(reason, NMI_REASON_PORT);
i = 20000;
while (--i) {
@@ -357,8 +364,8 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
udelay(100);
}
- reason &= ~8;
- outb(reason, 0x61);
+ reason &= ~NMI_REASON_CLEAR_IOCHK;
+ outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
@@ -377,57 +384,50 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
return;
}
#endif
- printk(KERN_EMERG
- "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
- reason, smp_processor_id());
+ pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
- printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
+ pr_emerg("Do you have a strange power saving mode enabled?\n");
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
- printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+ pr_emerg("Dazed and confused, but trying to continue\n");
}
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
- int cpu;
- cpu = smp_processor_id();
-
- /* Only the BSP gets external NMIs from the system. */
- if (!cpu)
- reason = get_nmi_reason();
+ /*
+ * CPU-specific NMI must be processed before non-CPU-specific
+ * NMI, otherwise we may lose it, because the CPU-specific
+ * NMI can not be detected/processed on other CPUs.
+ */
+ if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
+ return;
- if (!(reason & 0xc0)) {
- if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
- == NOTIFY_STOP)
- return;
+ /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
+ raw_spin_lock(&nmi_reason_lock);
+ reason = get_nmi_reason();
-#ifdef CONFIG_X86_LOCAL_APIC
- if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
- == NOTIFY_STOP)
- return;
+ if (reason & NMI_REASON_MASK) {
+ if (reason & NMI_REASON_SERR)
+ pci_serr_error(reason, regs);
+ else if (reason & NMI_REASON_IOCHK)
+ io_check_error(reason, regs);
+#ifdef CONFIG_X86_32
+ /*
+ * Reassert NMI in case it became active
+ * meanwhile as it's edge-triggered:
+ */
+ reassert_nmi();
#endif
- unknown_nmi_error(reason, regs);
-
+ raw_spin_unlock(&nmi_reason_lock);
return;
}
- if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
- return;
+ raw_spin_unlock(&nmi_reason_lock);
- /* AK: following checks seem to be broken on modern chipsets. FIXME */
- if (reason & 0x80)
- mem_parity_error(reason, regs);
- if (reason & 0x40)
- io_check_error(reason, regs);
-#ifdef CONFIG_X86_32
- /*
- * Reassert NMI in case it became active meanwhile
- * as it's edge-triggered:
- */
- reassert_nmi();
-#endif
+ unknown_nmi_error(reason, regs);
}
dotraplinkage notrace __kprobes void
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 03d2ea82f35..823f79a17ad 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -965,7 +965,7 @@ out:
static int __init init_tsc_clocksource(void)
{
- if (!cpu_has_tsc || tsc_disabled > 0)
+ if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
return 0;
if (tsc_clocksource_reliable)
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index 08a0069b87a..f21962c435e 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -27,6 +27,7 @@
#include <asm/amd_nb.h>
static struct bootnode __initdata nodes[8];
+static unsigned char __initdata nodeids[8];
static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
static __init int find_northbridge(void)
@@ -68,19 +69,6 @@ static __init void early_get_boot_cpu_id(void)
#endif
}
-int __init amd_get_nodes(struct bootnode *physnodes)
-{
- int i;
- int ret = 0;
-
- for_each_node_mask(i, nodes_parsed) {
- physnodes[ret].start = nodes[i].start;
- physnodes[ret].end = nodes[i].end;
- ret++;
- }
- return ret;
-}
-
int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long start = PFN_PHYS(start_pfn);
@@ -113,7 +101,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
base = read_pci_config(0, nb, 1, 0x40 + i*8);
limit = read_pci_config(0, nb, 1, 0x44 + i*8);
- nodeid = limit & 7;
+ nodeids[i] = nodeid = limit & 7;
if ((base & 3) == 0) {
if (i < numnodes)
pr_info("Skipping disabled node %d\n", i);
@@ -193,6 +181,76 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
return 0;
}
+#ifdef CONFIG_NUMA_EMU
+static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
+ [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+};
+
+void __init amd_get_nodes(struct bootnode *physnodes)
+{
+ int i;
+
+ for_each_node_mask(i, nodes_parsed) {
+ physnodes[i].start = nodes[i].start;
+ physnodes[i].end = nodes[i].end;
+ }
+}
+
+static int __init find_node_by_addr(unsigned long addr)
+{
+ int ret = NUMA_NO_NODE;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (addr >= nodes[i].start && addr < nodes[i].end) {
+ ret = i;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
+ * setup to represent the physical topology but reflect the emulated
+ * environment. For each emulated node, the real node which it appears on is
+ * found and a fake pxm to nid mapping is created which mirrors the actual
+ * locality. node_distance() then represents the correct distances between
+ * emulated nodes by using the fake acpi mappings to pxms.
+ */
+void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
+{
+ unsigned int bits;
+ unsigned int cores;
+ unsigned int apicid_base = 0;
+ int i;
+
+ bits = boot_cpu_data.x86_coreid_bits;
+ cores = 1 << bits;
+ early_get_boot_cpu_id();
+ if (boot_cpu_physical_apicid > 0)
+ apicid_base = boot_cpu_physical_apicid;
+
+ for (i = 0; i < nr_nodes; i++) {
+ int index;
+ int nid;
+ int j;
+
+ nid = find_node_by_addr(nodes[i].start);
+ if (nid == NUMA_NO_NODE)
+ continue;
+
+ index = nodeids[nid] << bits;
+ if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
+ for (j = apicid_base; j < cores + apicid_base; j++)
+ fake_apicid_to_node[index + j] = i;
+#ifdef CONFIG_ACPI_NUMA
+ __acpi_map_pxm_to_node(nid, i);
+#endif
+ }
+ memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
+}
+#endif /* CONFIG_NUMA_EMU */
+
int __init amd_scan_nodes(void)
{
unsigned int bits;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 7762a517d69..1e72102e80c 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -260,30 +260,30 @@ void __init numa_init_array(void)
#ifdef CONFIG_NUMA_EMU
/* Numa emulation */
static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __initdata;
+static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
static char *cmdline __initdata;
static int __init setup_physnodes(unsigned long start, unsigned long end,
int acpi, int amd)
{
- int nr_nodes = 0;
int ret = 0;
int i;
+ memset(physnodes, 0, sizeof(physnodes));
#ifdef CONFIG_ACPI_NUMA
if (acpi)
- nr_nodes = acpi_get_nodes(physnodes);
+ acpi_get_nodes(physnodes, start, end);
#endif
#ifdef CONFIG_AMD_NUMA
if (amd)
- nr_nodes = amd_get_nodes(physnodes);
+ amd_get_nodes(physnodes);
#endif
/*
* Basic sanity checking on the physical node map: there may be errors
* if the SRAT or AMD code incorrectly reported the topology or the mem=
* kernel parameter is used.
*/
- for (i = 0; i < nr_nodes; i++) {
+ for (i = 0; i < MAX_NUMNODES; i++) {
if (physnodes[i].start == physnodes[i].end)
continue;
if (physnodes[i].start > end) {
@@ -298,17 +298,6 @@ static int __init setup_physnodes(unsigned long start, unsigned long end,
physnodes[i].start = start;
if (physnodes[i].end > end)
physnodes[i].end = end;
- }
-
- /*
- * Remove all nodes that have no memory or were truncated because of the
- * limited address range.
- */
- for (i = 0; i < nr_nodes; i++) {
- if (physnodes[i].start == physnodes[i].end)
- continue;
- physnodes[ret].start = physnodes[i].start;
- physnodes[ret].end = physnodes[i].end;
ret++;
}
@@ -324,6 +313,24 @@ static int __init setup_physnodes(unsigned long start, unsigned long end,
return ret;
}
+static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
+{
+ int i;
+
+ BUG_ON(acpi && amd);
+#ifdef CONFIG_ACPI_NUMA
+ if (acpi)
+ acpi_fake_nodes(nodes, nr_nodes);
+#endif
+#ifdef CONFIG_AMD_NUMA
+ if (amd)
+ amd_fake_nodes(nodes, nr_nodes);
+#endif
+ if (!acpi && !amd)
+ for (i = 0; i < nr_cpu_ids; i++)
+ numa_set_node(i, 0);
+}
+
/*
* Setups up nid to range from addr to addr + size. If the end
* boundary is greater than max_addr, then max_addr is used instead.
@@ -352,8 +359,7 @@ static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
* Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
* to max_addr. The return value is the number of nodes allocated.
*/
-static int __init split_nodes_interleave(u64 addr, u64 max_addr,
- int nr_phys_nodes, int nr_nodes)
+static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
{
nodemask_t physnode_mask = NODE_MASK_NONE;
u64 size;
@@ -384,7 +390,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
return -1;
}
- for (i = 0; i < nr_phys_nodes; i++)
+ for (i = 0; i < MAX_NUMNODES; i++)
if (physnodes[i].start != physnodes[i].end)
node_set(i, physnode_mask);
@@ -553,11 +559,9 @@ static int __init numa_emulation(unsigned long start_pfn,
{
u64 addr = start_pfn << PAGE_SHIFT;
u64 max_addr = last_pfn << PAGE_SHIFT;
- int num_phys_nodes;
int num_nodes;
int i;
- num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
/*
* If the numa=fake command-line contains a 'M' or 'G', it represents
* the fixed node size. Otherwise, if it is just a single number N,
@@ -572,7 +576,7 @@ static int __init numa_emulation(unsigned long start_pfn,
unsigned long n;
n = simple_strtoul(cmdline, NULL, 0);
- num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n);
+ num_nodes = split_nodes_interleave(addr, max_addr, n);
}
if (num_nodes < 0)
@@ -595,7 +599,8 @@ static int __init numa_emulation(unsigned long start_pfn,
nodes[i].end >> PAGE_SHIFT);
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
}
- acpi_fake_nodes(nodes, num_nodes);
+ setup_physnodes(addr, max_addr, acpi, amd);
+ fake_physnodes(acpi, amd, num_nodes);
numa_init_array();
return 0;
}
@@ -610,8 +615,12 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
nodes_clear(node_online_map);
#ifdef CONFIG_NUMA_EMU
+ setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+ acpi, amd);
if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
return;
+ setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+ acpi, amd);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
#endif
@@ -767,6 +776,7 @@ void __cpuinit numa_clear_node(int cpu)
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+#ifndef CONFIG_NUMA_EMU
void __cpuinit numa_add_cpu(int cpu)
{
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
@@ -776,34 +786,115 @@ void __cpuinit numa_remove_cpu(int cpu)
{
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
+#else
+void __cpuinit numa_add_cpu(int cpu)
+{
+ unsigned long addr;
+ u16 apicid;
+ int physnid;
+ int nid = NUMA_NO_NODE;
+
+ apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
+ if (apicid != BAD_APICID)
+ nid = apicid_to_node[apicid];
+ if (nid == NUMA_NO_NODE)
+ nid = early_cpu_to_node(cpu);
+ BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
+
+ /*
+ * Use the starting address of the emulated node to find which physical
+ * node it is allocated on.
+ */
+ addr = node_start_pfn(nid) << PAGE_SHIFT;
+ for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
+ if (addr >= physnodes[physnid].start &&
+ addr < physnodes[physnid].end)
+ break;
+
+ /*
+ * Map the cpu to each emulated node that is allocated on the physical
+ * node of the cpu's apic id.
+ */
+ for_each_online_node(nid) {
+ addr = node_start_pfn(nid) << PAGE_SHIFT;
+ if (addr >= physnodes[physnid].start &&
+ addr < physnodes[physnid].end)
+ cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+ }
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+ int i;
+
+ for_each_online_node(i)
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
+}
+#endif /* !CONFIG_NUMA_EMU */
#else /* CONFIG_DEBUG_PER_CPU_MAPS */
+static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
+{
+ int node = early_cpu_to_node(cpu);
+ struct cpumask *mask;
+ char buf[64];
+
+ mask = node_to_cpumask_map[node];
+ if (!mask) {
+ pr_err("node_to_cpumask_map[%i] NULL\n", node);
+ dump_stack();
+ return NULL;
+ }
+
+ cpulist_scnprintf(buf, sizeof(buf), mask);
+ printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+ enable ? "numa_add_cpu" : "numa_remove_cpu",
+ cpu, node, buf);
+ return mask;
+}
/*
* --------- debug versions of the numa functions ---------
*/
+#ifndef CONFIG_NUMA_EMU
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
- int node = early_cpu_to_node(cpu);
struct cpumask *mask;
- char buf[64];
- mask = node_to_cpumask_map[node];
- if (mask == NULL) {
- printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
- dump_stack();
+ mask = debug_cpumask_set_cpu(cpu, enable);
+ if (!mask)
return;
- }
if (enable)
cpumask_set_cpu(cpu, mask);
else
cpumask_clear_cpu(cpu, mask);
+}
+#else
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+ int node = early_cpu_to_node(cpu);
+ struct cpumask *mask;
+ int i;
- cpulist_scnprintf(buf, sizeof(buf), mask);
- printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
- enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+ for_each_online_node(i) {
+ unsigned long addr;
+
+ addr = node_start_pfn(i) << PAGE_SHIFT;
+ if (addr < physnodes[node].start ||
+ addr >= physnodes[node].end)
+ continue;
+ mask = debug_cpumask_set_cpu(cpu, enable);
+ if (!mask)
+ return;
+
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+ }
}
+#endif /* CONFIG_NUMA_EMU */
void __cpuinit numa_add_cpu(int cpu)
{
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 171a0aacb99..603d285d1da 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -349,18 +349,19 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
void __init acpi_numa_arch_fixup(void) {}
-int __init acpi_get_nodes(struct bootnode *physnodes)
+#ifdef CONFIG_NUMA_EMU
+void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+ unsigned long end)
{
int i;
- int ret = 0;
for_each_node_mask(i, nodes_parsed) {
- physnodes[ret].start = nodes[i].start;
- physnodes[ret].end = nodes[i].end;
- ret++;
+ cutoff_node(i, start, end);
+ physnodes[i].start = nodes[i].start;
+ physnodes[i].end = nodes[i].end;
}
- return ret;
}
+#endif /* CONFIG_NUMA_EMU */
/* Use the information discovered above to actually set up the nodes. */
int __init acpi_scan_nodes(unsigned long start, unsigned long end)
@@ -505,8 +506,6 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
{
int i, j;
- printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
- "topology.\n");
for (i = 0; i < num_nodes; i++) {
int nid, pxm;
@@ -526,6 +525,17 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
fake_apicid_to_node[j] == NUMA_NO_NODE)
fake_apicid_to_node[j] = i;
}
+
+ /*
+ * If there are apicid-to-node mappings for physical nodes that do not
+ * have a corresponding emulated node, it should default to a guaranteed
+ * value.
+ */
+ for (i = 0; i < MAX_LOCAL_APIC; i++)
+ if (apicid_to_node[i] != NUMA_NO_NODE &&
+ fake_apicid_to_node[i] == NUMA_NO_NODE)
+ fake_apicid_to_node[i] = 0;
+
for (i = 0; i < num_nodes; i++)
__acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f24a8533bcd..e2b7b0c06cd 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -65,7 +65,6 @@ static int profile_exceptions_notify(struct notifier_block *self,
switch (val) {
case DIE_NMI:
- case DIE_NMI_IPI:
if (ctr_running)
model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
else if (!nmi_enabled)
@@ -361,7 +360,7 @@ static void nmi_cpu_setup(void *dummy)
static struct notifier_block profile_exceptions_nb = {
.notifier_call = profile_exceptions_notify,
.next = NULL,
- .priority = 2
+ .priority = NMI_LOCAL_LOW_PRIOR,
};
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 0636dd93cef..720bf5a53c5 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -38,7 +38,7 @@ static int profile_timer_exceptions_notify(struct notifier_block *self,
static struct notifier_block profile_timer_exceptions_nb = {
.notifier_call = profile_timer_exceptions_notify,
.next = NULL,
- .priority = 0
+ .priority = NMI_LOW_PRIOR,
};
static int timer_start(void)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index fc1e8fe07e5..e27dffbbb1a 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -4,6 +4,7 @@
#include <linux/cpu.h>
#include <linux/range.h>
+#include <asm/amd_nb.h>
#include <asm/pci_x86.h>
#include <asm/pci-direct.h>
@@ -378,6 +379,34 @@ static struct notifier_block __cpuinitdata amd_cpu_notifier = {
.notifier_call = amd_cpu_notify,
};
+static void __init pci_enable_pci_io_ecs(void)
+{
+#ifdef CONFIG_AMD_NB
+ unsigned int i, n;
+
+ for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) {
+ u8 bus = amd_nb_bus_dev_ranges[i].bus;
+ u8 slot = amd_nb_bus_dev_ranges[i].dev_base;
+ u8 limit = amd_nb_bus_dev_ranges[i].dev_limit;
+
+ for (; slot < limit; ++slot) {
+ u32 val = read_pci_config(bus, slot, 3, 0);
+
+ if (!early_is_amd_nb(val))
+ continue;
+
+ val = read_pci_config(bus, slot, 3, 0x8c);
+ if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) {
+ val |= ENABLE_CF8_EXT_CFG >> 32;
+ write_pci_config(bus, slot, 3, 0x8c, val);
+ }
+ ++n;
+ }
+ }
+ pr_info("Extended Config Space enabled on %u nodes\n", n);
+#endif
+}
+
static int __init pci_io_ecs_init(void)
{
int cpu;
@@ -386,6 +415,10 @@ static int __init pci_io_ecs_init(void)
if (boot_cpu_data.x86 < 0x10)
return 0;
+ /* Try the PCI method first. */
+ if (early_pci_allowed())
+ pci_enable_pci_io_ecs();
+
register_cpu_notifier(&amd_cpu_notifier);
for_each_online_cpu(cpu)
amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ffe9b655292..9f47e862526 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1926,8 +1926,9 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
+ const char *errmsg = 0;
int res;
-
+
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
if (res) {
PRINTK (KERN_ERR, "Cannot load microcode data");
@@ -1937,8 +1938,8 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
/* First record contains just the start address */
rec = (const struct ihex_binrec *)fw->data;
if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
- PRINTK (KERN_ERR, "Bad microcode data (no start record)");
- return -EINVAL;
+ errmsg = "no start record";
+ goto fail;
}
start_address = be32_to_cpup((__be32 *)rec->data);
@@ -1950,12 +1951,12 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
be16_to_cpu(rec->len));
if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
- PRINTK (KERN_ERR, "Bad microcode data (record too long)");
- return -EINVAL;
+ errmsg = "record too long";
+ goto fail;
}
if (be16_to_cpu(rec->len) & 3) {
- PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)");
- return -EINVAL;
+ errmsg = "odd number of bytes";
+ goto fail;
}
res = loader_write(lb, dev, rec);
if (res)
@@ -1970,6 +1971,10 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
res = loader_start(lb, dev, start_address);
return res;
+fail:
+ release_firmware(fw);
+ PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
+ return -EINVAL;
}
/********** give adapter parameters **********/
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5259065f3c7..3e67ddde9e1 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -120,7 +120,6 @@ struct agp_bridge_driver {
void (*agp_destroy_page)(struct page *, int flags);
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
- void (*chipset_flush)(struct agp_bridge_data *);
};
struct agp_bridge_data {
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index 9d2c97a69cd..a48e05b3159 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case AGPIOC_CHIPSET_FLUSH32:
- ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 0c9678ac037..f30e0fd9796 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 3cb4539a98b..2e044338753 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
return agp_unbind_memory(memory);
}
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
-{
- DBG("");
- agp_flush_chipset(agp_bridge);
- return 0;
-}
-
static long agp_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file,
break;
case AGPIOC_CHIPSET_FLUSH:
- ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 4956f1c8f9d..012cba0d6d9 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -81,13 +81,6 @@ static int agp_get_key(void)
return -1;
}
-void agp_flush_chipset(struct agp_bridge_data *bridge)
-{
- if (bridge->driver->chipset_flush)
- bridge->driver->chipset_flush(bridge);
-}
-EXPORT_SYMBOL(agp_flush_chipset);
-
/*
* Use kmalloc if possible for the page list. Otherwise fall back to
* vmalloc. This speeds things up and also saves memory for small AGP
@@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr)
}
EXPORT_SYMBOL(agp_unbind_memory);
-/**
- * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
- */
-int agp_rebind_memory(void)
-{
- struct agp_memory *curr;
- int ret_val = 0;
-
- spin_lock(&agp_bridge->mapped_lock);
- list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
- ret_val = curr->bridge->driver->insert_memory(curr,
- curr->pg_start,
- curr->type);
- if (ret_val != 0)
- break;
- }
- spin_unlock(&agp_bridge->mapped_lock);
- return ret_val;
-}
-EXPORT_SYMBOL(agp_rebind_memory);
/* End - Routines for handling swapping of agp_memory into the GATT */
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index e72f49d5220..07e9796fead 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
- int ret_val;
bridge->driver->configure();
- ret_val = agp_rebind_memory();
- if (ret_val != 0)
- return ret_val;
-
return 0;
}
#endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 90539df0250..010e3defd6c 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -75,6 +75,8 @@
#define I810_GMS_DISABLE 0x00000000
#define I810_PGETBL_CTL 0x2020
#define I810_PGETBL_ENABLED 0x00000001
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2 0x20c4
#define I965_PGETBL_SIZE_MASK 0x0000000e
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
@@ -82,9 +84,15 @@
#define I965_PGETBL_SIZE_1MB (3 << 1)
#define I965_PGETBL_SIZE_2MB (4 << 1)
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
-#define G33_PGETBL_SIZE_MASK (3 << 8)
-#define G33_PGETBL_SIZE_1M (1 << 8)
-#define G33_PGETBL_SIZE_2M (2 << 8)
+#define G33_GMCH_SIZE_MASK (3 << 8)
+#define G33_GMCH_SIZE_1M (1 << 8)
+#define G33_GMCH_SIZE_2M (2 << 8)
+#define G4x_GMCH_SIZE_MASK (0xf << 8)
+#define G4x_GMCH_SIZE_1M (0x1 << 8)
+#define G4x_GMCH_SIZE_2M (0x3 << 8)
+#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
+#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
+#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 29ac6d499fa..356f73e0d17 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -24,7 +24,6 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
-#include <linux/intel-gtt.h>
#include <drm/intel-gtt.h>
/*
@@ -39,40 +38,12 @@
#define USE_PCI_DMA_API 0
#endif
-/* Max amount of stolen space, anything above will be returned to Linux */
-int intel_max_stolen = 32 * 1024 * 1024;
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
- {64, 16384, 4},
- /* The 32M mode still requires a 64k gatt */
- {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-#define AGP_PHYS_MEMORY 2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
- .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-#define INTEL_AGP_UNCACHED_MEMORY 0
-#define INTEL_AGP_CACHED_MEMORY_LLC 1
-#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
-
struct intel_gtt_driver {
unsigned int gen : 8;
unsigned int is_g33 : 1;
unsigned int is_pineview : 1;
unsigned int is_ironlake : 1;
+ unsigned int has_pgtbl_enable : 1;
unsigned int dma_mask_size : 8;
/* Chipset specific GTT setup */
int (*setup)(void);
@@ -95,13 +66,14 @@ static struct _intel_private {
u8 __iomem *registers;
phys_addr_t gtt_bus_addr;
phys_addr_t gma_bus_addr;
- phys_addr_t pte_bus_addr;
+ u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
int num_dcache_entries;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
};
+ char *i81x_gtt_table;
struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
@@ -113,42 +85,31 @@ static struct _intel_private {
#define IS_G33 intel_private.driver->is_g33
#define IS_PINEVIEW intel_private.driver->is_pineview
#define IS_IRONLAKE intel_private.driver->is_ironlake
+#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
- struct sg_table st;
-
- st.sgl = mem->sg_list;
- st.orig_nents = st.nents = mem->page_count;
-
- sg_free_table(&st);
-
- mem->sg_list = NULL;
- mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+ struct scatterlist **sg_list, int *num_sg)
{
struct sg_table st;
struct scatterlist *sg;
int i;
- if (mem->sg_list)
+ if (*sg_list)
return 0; /* already mapped (for e.g. resume */
- DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+ DBG("try mapping %lu pages\n", (unsigned long)num_entries);
- if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+ if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
goto err;
- mem->sg_list = sg = st.sgl;
+ *sg_list = sg = st.sgl;
- for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
- sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+ for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+ sg_set_page(sg, pages[i], PAGE_SIZE, 0);
- mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!mem->num_sg))
+ *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
+ num_entries, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!*num_sg))
goto err;
return 0;
@@ -157,90 +118,22 @@ err:
sg_free_table(&st);
return -ENOMEM;
}
+EXPORT_SYMBOL(intel_gtt_map_memory);
-static void intel_agp_unmap_memory(struct agp_memory *mem)
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
{
+ struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
- pci_unmap_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- intel_agp_free_sglist(mem);
-}
-
-static int intel_i810_fetch_size(void)
-{
- u32 smram_miscc;
- struct aper_size_info_fixed *values;
-
- pci_read_config_dword(intel_private.bridge_dev,
- I810_SMRAM_MISCC, &smram_miscc);
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
- return 0;
- }
- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- } else {
- agp_bridge->current_size = (void *) (values);
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- return 0;
-}
-
-static int intel_i810_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
+ pci_unmap_sg(intel_private.pcidev, sg_list,
+ num_sg, PCI_DMA_BIDIRECTIONAL);
- if (!intel_private.registers) {
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
+ st.sgl = sg_list;
+ st.orig_nents = st.nents = num_sg;
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- dev_err(&intel_private.pcidev->dev,
- "can't remap memory\n");
- return -ENOMEM;
- }
- }
-
- if ((readl(intel_private.registers+I810_DRAM_CTL)
- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
- /* This will need to be dynamically assigned */
- dev_info(&intel_private.pcidev->dev,
- "detected 4MB dedicated video ram\n");
- intel_private.num_dcache_entries = 1024;
- }
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = 0; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
- }
- global_cache_flush();
- return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
- writel(0, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers); /* PCI Posting. */
- iounmap(intel_private.registers);
+ sg_free_table(&st);
}
+EXPORT_SYMBOL(intel_gtt_unmap_memory);
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
@@ -277,80 +170,64 @@ static void i8xx_destroy_pages(struct page *page)
atomic_dec(&agp_bridge->current_memory_agp);
}
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+#define I810_GTT_ORDER 4
+static int i810_setup(void)
{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
+ u32 reg_addr;
+ char *gtt_table;
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
+ /* i81x does not preallocate the gtt. It's always 64kb in size. */
+ gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
+ if (gtt_table == NULL)
+ return -ENOMEM;
+ intel_private.i81x_gtt_table = gtt_table;
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+ reg_addr &= 0xfff80000;
- for (j = pg_start; j < (pg_start + mem->page_count); j++) {
- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
- ret = -EBUSY;
- goto out_err;
- }
- }
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
+ return -ENOMEM;
- if (type != mem->type)
- goto out_err;
+ writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
+ intel_private.registers+I810_PGETBL_CTL);
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- switch (mask_type) {
- case AGP_DCACHE_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = pg_start; i < (pg_start + mem->page_count); i++) {
- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
- intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
- break;
- case AGP_PHYS_MEMORY:
- case AGP_NORMAL_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- break;
- default:
- goto out_err;
+ if ((readl(intel_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
+ intel_private.num_dcache_entries = 1024;
}
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
+ return 0;
}
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static void i810_cleanup(void)
+{
+ writel(0, intel_private.registers+I810_PGETBL_CTL);
+ free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
+}
+
+static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
int i;
- if (mem->page_count == 0)
- return 0;
+ if ((pg_start + mem->page_count)
+ > intel_private.num_dcache_entries)
+ return -EINVAL;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ dma_addr_t addr = i << PAGE_SHIFT;
+ intel_private.driver->write_entry(addr,
+ i, type);
}
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ readl(intel_private.gtt+i-1);
return 0;
}
@@ -397,29 +274,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
return new;
}
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
- struct agp_memory *new;
-
- if (type == AGP_DCACHE_MEMORY) {
- if (pg_count != intel_private.num_dcache_entries)
- return NULL;
-
- new = agp_create_memory(1);
- if (new == NULL)
- return NULL;
-
- new->type = AGP_DCACHE_MEMORY;
- new->page_count = pg_count;
- new->num_scratch_pages = 0;
- agp_free_page_array(new);
- return new;
- }
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- return NULL;
-}
-
static void intel_i810_free_by_type(struct agp_memory *curr)
{
agp_free_key(curr->key);
@@ -437,13 +291,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
kfree(curr);
}
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
static int intel_gtt_setup_scratch_page(void)
{
struct page *page;
@@ -455,7 +302,7 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page);
set_pages_uc(page, 1);
- if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ if (intel_private.base.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
@@ -470,34 +317,45 @@ static int intel_gtt_setup_scratch_page(void)
return 0;
}
-static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
+static void i810_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags = I810_PTE_VALID;
+
+ switch (flags) {
+ case AGP_DCACHE_MEMORY:
+ pte_flags |= I810_PTE_LOCAL;
+ break;
+ case AGP_USER_CACHED_MEMORY:
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+ break;
+ }
+
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+ {32, 8192, 3},
+ {64, 16384, 4},
{128, 32768, 5},
- /* The 64M mode still requires a 128k gatt */
- {64, 16384, 5},
{256, 65536, 6},
{512, 131072, 7},
};
-static unsigned int intel_gtt_stolen_entries(void)
+static unsigned int intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- unsigned int overhead_entries, stolen_entries;
unsigned int stolen_size = 0;
+ if (INTEL_GTT_GEN == 1)
+ return 0; /* no stolen mem on i81x */
+
pci_read_config_word(intel_private.bridge_dev,
I830_GMCH_CTRL, &gmch_ctrl);
- if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
- overhead_entries = 0;
- else
- overhead_entries = intel_private.base.gtt_mappable_entries
- / 1024;
-
- overhead_entries += 1; /* BIOS popup */
-
if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
@@ -623,12 +481,7 @@ static unsigned int intel_gtt_stolen_entries(void)
}
}
- if (!local && stolen_size > intel_max_stolen) {
- dev_info(&intel_private.bridge_dev->dev,
- "detected %dK stolen memory, trimming to %dK\n",
- stolen_size / KB(1), intel_max_stolen / KB(1));
- stolen_size = intel_max_stolen;
- } else if (stolen_size > 0) {
+ if (stolen_size > 0) {
dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
stolen_size / KB(1), local ? "local" : "stolen");
} else {
@@ -637,46 +490,88 @@ static unsigned int intel_gtt_stolen_entries(void)
stolen_size = 0;
}
- stolen_entries = stolen_size/KB(4) - overhead_entries;
+ return stolen_size;
+}
- return stolen_entries;
+static void i965_adjust_pgetbl_size(unsigned int size_flag)
+{
+ u32 pgetbl_ctl, pgetbl_ctl2;
+
+ /* ensure that ppgtt is disabled */
+ pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
+ pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+ writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
+
+ /* write the new ggtt size */
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+ pgetbl_ctl |= size_flag;
+ writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
}
-static unsigned int intel_gtt_total_entries(void)
+static unsigned int i965_gtt_total_entries(void)
{
int size;
+ u32 pgetbl_ctl;
+ u16 gmch_ctl;
- if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctl);
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = KB(128);
- break;
- case I965_PGETBL_SIZE_256KB:
- size = KB(256);
- break;
- case I965_PGETBL_SIZE_512KB:
- size = KB(512);
- break;
- case I965_PGETBL_SIZE_1MB:
- size = KB(1024);
+ if (INTEL_GTT_GEN == 5) {
+ switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+ case G4x_GMCH_SIZE_1M:
+ case G4x_GMCH_SIZE_VT_1M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
break;
- case I965_PGETBL_SIZE_2MB:
- size = KB(2048);
+ case G4x_GMCH_SIZE_VT_1_5M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
break;
- case I965_PGETBL_SIZE_1_5MB:
- size = KB(1024 + 512);
+ case G4x_GMCH_SIZE_2M:
+ case G4x_GMCH_SIZE_VT_2M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = KB(512);
}
+ }
- return size/4;
- } else if (INTEL_GTT_GEN == 6) {
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = KB(512);
+ }
+
+ return size/4;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+ int size;
+
+ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
+ return i965_gtt_total_entries();
+ else if (INTEL_GTT_GEN == 6) {
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -706,7 +601,18 @@ static unsigned int intel_gtt_mappable_entries(void)
{
unsigned int aperture_size;
- if (INTEL_GTT_GEN == 2) {
+ if (INTEL_GTT_GEN == 1) {
+ u32 smram_miscc;
+
+ pci_read_config_dword(intel_private.bridge_dev,
+ I810_SMRAM_MISCC, &smram_miscc);
+
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+ == I810_GFX_MEM_WIN_32M)
+ aperture_size = MB(32);
+ else
+ aperture_size = MB(64);
+ } else if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
pci_read_config_word(intel_private.bridge_dev,
@@ -739,7 +645,7 @@ static void intel_gtt_cleanup(void)
iounmap(intel_private.gtt);
iounmap(intel_private.registers);
-
+
intel_gtt_teardown_scratch_page();
}
@@ -755,6 +661,14 @@ static int intel_gtt_init(void)
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+ /* save the PGETBL reg for resume */
+ intel_private.PGETBL_save =
+ readl(intel_private.registers+I810_PGETBL_CTL)
+ & ~I810_PGETBL_ENABLED;
+ /* we only ever restore the register when enabling the PGTBL... */
+ if (HAS_PGTBL_EN)
+ intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
+
dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n",
intel_private.base.gtt_total_entries * 4,
@@ -772,14 +686,7 @@ static int intel_gtt_init(void)
global_cache_flush(); /* FIXME: ? */
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
- if (intel_private.base.gtt_stolen_entries == 0) {
- intel_private.driver->cleanup();
- iounmap(intel_private.registers);
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
+ intel_private.base.stolen_size = intel_gtt_stolen_size();
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
@@ -787,6 +694,8 @@ static int intel_gtt_init(void)
return ret;
}
+ intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+
return 0;
}
@@ -862,25 +771,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
u32 pte_flags = I810_PTE_VALID;
-
- switch (flags) {
- case AGP_DCACHE_MEMORY:
- pte_flags |= I810_PTE_LOCAL;
- break;
- case AGP_USER_CACHED_MEMORY:
+
+ if (flags == AGP_USER_CACHED_MEMORY)
pte_flags |= I830_PTE_SYSTEM_CACHED;
- break;
- }
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static void intel_enable_gtt(void)
+static bool intel_enable_gtt(void)
{
u32 gma_addr;
- u16 gmch_ctrl;
+ u8 __iomem *reg;
- if (INTEL_GTT_GEN == 2)
+ if (INTEL_GTT_GEN <= 2)
pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
&gma_addr);
else
@@ -889,13 +792,38 @@ static void intel_enable_gtt(void)
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
- pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+ if (INTEL_GTT_GEN >= 6)
+ return true;
- writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
- intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+ if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, gmch_ctrl);
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: GMCH_CTRL=%x\n",
+ gmch_ctrl);
+ return false;
+ }
+ }
+
+ reg = intel_private.registers+I810_PGETBL_CTL;
+ writel(intel_private.PGETBL_save, reg);
+ if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: PGETBL=%x [expected %x]\n",
+ readl(reg), intel_private.PGETBL_save);
+ return false;
+ }
+
+ return true;
}
static int i830_setup(void)
@@ -910,8 +838,6 @@ static int i830_setup(void)
return -ENOMEM;
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- intel_private.pte_bus_addr =
- readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
intel_i830_setup_flush();
@@ -936,12 +862,12 @@ static int intel_fake_agp_configure(void)
{
int i;
- intel_enable_gtt();
+ if (!intel_enable_gtt())
+ return -EIO;
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
- for (i = intel_private.base.gtt_stolen_entries;
- i < intel_private.base.gtt_total_entries; i++) {
+ for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
@@ -965,10 +891,10 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
-static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
- unsigned int sg_len,
- unsigned int pg_start,
- unsigned int flags)
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags)
{
struct scatterlist *sg;
unsigned int len, m;
@@ -989,27 +915,34 @@ static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
}
readl(intel_private.gtt+j-1);
}
+EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+ struct page **pages, unsigned int flags)
+{
+ int i, j;
+
+ for (i = 0, j = first_entry; i < num_entries; i++, j++) {
+ dma_addr_t addr = page_to_phys(pages[i]);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ }
+ readl(intel_private.gtt+j-1);
+}
+EXPORT_SYMBOL(intel_gtt_insert_pages);
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
- int i, j;
int ret = -EINVAL;
+ if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
+ return i810_insert_dcache_entries(mem, pg_start, type);
+
if (mem->page_count == 0)
goto out;
- if (pg_start < intel_private.base.gtt_stolen_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
- pg_start, intel_private.base.gtt_stolen_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
+ if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
goto out_err;
if (type != mem->type)
@@ -1021,21 +954,17 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed)
global_cache_flush();
- if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
- ret = intel_agp_map_memory(mem);
+ if (intel_private.base.needs_dmar) {
+ ret = intel_gtt_map_memory(mem->pages, mem->page_count,
+ &mem->sg_list, &mem->num_sg);
if (ret != 0)
return ret;
intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
pg_start, type);
- } else {
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- dma_addr_t addr = page_to_phys(mem->pages[i]);
- intel_private.driver->write_entry(addr,
- j, type);
- }
- readl(intel_private.gtt+j-1);
- }
+ } else
+ intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+ type);
out:
ret = 0;
@@ -1044,40 +973,54 @@ out_err:
return ret;
}
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+{
+ unsigned int i;
+
+ for (i = first_entry; i < (first_entry + num_entries); i++) {
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
+ }
+ readl(intel_private.gtt+i-1);
+}
+EXPORT_SYMBOL(intel_gtt_clear_range);
+
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
- int i;
-
if (mem->page_count == 0)
return 0;
- if (pg_start < intel_private.base.gtt_stolen_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
+ if (intel_private.base.needs_dmar) {
+ intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
}
- if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
- intel_agp_unmap_memory(mem);
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- intel_private.driver->write_entry(intel_private.scratch_page_dma,
- i, 0);
- }
- readl(intel_private.gtt+i-1);
+ intel_gtt_clear_range(pg_start, mem->page_count);
return 0;
}
-static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
-{
- intel_private.driver->chipset_flush();
-}
-
static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
int type)
{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
+ if (pg_count != intel_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ agp_free_page_array(new);
+ return new;
+ }
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
/* always return NULL for other allocation types for now */
@@ -1274,40 +1217,11 @@ static int i9xx_setup(void)
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
- intel_private.pte_bus_addr =
- readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-
intel_i9xx_setup_flush();
return 0;
}
-static const struct agp_bridge_driver intel_810_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i810_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 2,
- .needs_scratch_page = true,
- .configure = intel_i810_configure,
- .fetch_size = intel_i810_fetch_size,
- .cleanup = intel_i810_cleanup,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_fake_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = agp_generic_create_gatt_table,
- .free_gatt_table = agp_generic_free_gatt_table,
- .insert_memory = intel_i810_insert_entries,
- .remove_memory = intel_i810_remove_entries,
- .alloc_by_type = intel_i810_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
-};
-
static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
@@ -1328,15 +1242,20 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
- .chipset_flush = intel_fake_agp_chipset_flush,
};
static const struct intel_gtt_driver i81x_gtt_driver = {
.gen = 1,
+ .has_pgtbl_enable = 1,
.dma_mask_size = 32,
+ .setup = i810_setup,
+ .cleanup = i810_cleanup,
+ .check_flags = i830_check_flags,
+ .write_entry = i810_write_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
+ .has_pgtbl_enable = 1,
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
@@ -1346,10 +1265,11 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
};
static const struct intel_gtt_driver i915_gtt_driver = {
.gen = 3,
+ .has_pgtbl_enable = 1,
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
- .write_entry = i830_write_entry,
+ .write_entry = i830_write_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1376,6 +1296,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
};
static const struct intel_gtt_driver i965_gtt_driver = {
.gen = 4,
+ .has_pgtbl_enable = 1,
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
@@ -1419,93 +1340,92 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
static const struct intel_gtt_driver_description {
unsigned int gmch_chip_id;
char *name;
- const struct agp_bridge_driver *gmch_driver;
const struct intel_gtt_driver *gtt_driver;
} intel_gtt_chipsets[] = {
- { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
&i81x_gtt_driver},
- { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
&i81x_gtt_driver},
- { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
&i81x_gtt_driver},
- { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
&i81x_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
- &intel_fake_agp_driver, &g33_gtt_driver },
+ &g33_gtt_driver },
{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
- &intel_fake_agp_driver, &g33_gtt_driver },
+ &g33_gtt_driver },
{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
- &intel_fake_agp_driver, &g33_gtt_driver },
+ &g33_gtt_driver },
{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
- &intel_fake_agp_driver, &pineview_gtt_driver },
+ &pineview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
- &intel_fake_agp_driver, &pineview_gtt_driver },
+ &pineview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
- "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1530,21 +1450,20 @@ int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge)
{
int i, mask;
- bridge->driver = NULL;
+ intel_private.driver = NULL;
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
- bridge->driver =
- intel_gtt_chipsets[i].gmch_driver;
- intel_private.driver =
+ intel_private.driver =
intel_gtt_chipsets[i].gtt_driver;
break;
}
}
- if (!bridge->driver)
+ if (!intel_private.driver)
return 0;
+ bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = pdev;
@@ -1560,8 +1479,8 @@ int intel_gmch_probe(struct pci_dev *pdev,
pci_set_consistent_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(mask));
- if (bridge->driver == &intel_810_driver)
- return 1;
+ /*if (bridge->driver == &intel_810_driver)
+ return 1;*/
if (intel_gtt_init() != 0)
return 0;
@@ -1570,12 +1489,19 @@ int intel_gmch_probe(struct pci_dev *pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-struct intel_gtt *intel_gtt_get(void)
+const struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
EXPORT_SYMBOL(intel_gtt_get);
+void intel_gtt_chipset_flush(void)
+{
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_chipset_flush);
+
void intel_gmch_remove(struct pci_dev *pdev)
{
if (intel_private.pcidev)
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 27370e99c66..5e2f52b3332 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -39,7 +39,7 @@
#include "hvc_console.h"
-char hvc_driver_name[] = "hvc_console";
+static const char hvc_driver_name[] = "hvc_console";
static struct vio_device_id hvc_driver_table[] __devinitdata = {
{"serial", "hvterm1"},
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f4d334f2536..320668f4c3a 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1081,7 +1081,7 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
{
struct die_args *args = data;
- if (val != DIE_NMI)
+ if (val != DIE_NMIUNKNOWN)
return NOTIFY_OK;
/* Hack, if it's a memory or I/O error, ignore it. */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ee23592700..ef138731c0e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -109,7 +109,7 @@ config FSL_DMA
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
- depends on PPC_MPC512x
+ depends on PPC_MPC512x || PPC_MPC831x
select DMA_ENGINE
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4e9cbf30059..59c270192cc 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
* Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
*
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
* (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -70,6 +71,8 @@
#define MPC_DMA_DMAES_SBE (1 << 1)
#define MPC_DMA_DMAES_DBE (1 << 0)
+#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
+
#define MPC_DMA_TSIZE_1 0x00
#define MPC_DMA_TSIZE_2 0x01
#define MPC_DMA_TSIZE_4 0x02
@@ -104,7 +107,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs {
/* 0x30 */
u32 dmahrsh; /* DMA hw request status high(ch63~32) */
u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
- u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
+ union {
+ u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
+ u32 dmagpor; /* (General purpose register on MPC8308) */
+ };
u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
/* 0x40 ~ 0xff */
u32 reserve0[48]; /* Reserved */
@@ -195,7 +201,9 @@ struct mpc_dma {
struct mpc_dma_regs __iomem *regs;
struct mpc_dma_tcd __iomem *tcd;
int irq;
+ int irq2;
uint error_status;
+ int is_mpc8308;
/* Lock for error_status field in this structure */
spinlock_t error_status_lock;
@@ -252,11 +260,13 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
prev = mdesc;
}
- prev->tcd->start = 0;
prev->tcd->int_maj = 1;
/* Send first descriptor in chain into hardware */
memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+
+ if (first != prev)
+ mdma->tcd[cid].e_sg = 1;
out_8(&mdma->regs->dmassrt, cid);
}
@@ -274,6 +284,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
spin_lock(&mchan->lock);
+ out_8(&mdma->regs->dmacint, ch + off);
+ out_8(&mdma->regs->dmacerr, ch + off);
+
/* Check error status */
if (es & (1 << ch))
list_for_each_entry(mdesc, &mchan->active, node)
@@ -302,36 +315,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data)
spin_unlock(&mdma->error_status_lock);
/* Handle interrupt on each channel */
- mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+ if (mdma->dma.chancnt > 32) {
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
in_be32(&mdma->regs->dmaerrh), 32);
+ }
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
in_be32(&mdma->regs->dmaerrl), 0);
- /* Ack interrupt on all channels */
- out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
-
/* Schedule tasklet */
tasklet_schedule(&mdma->tasklet);
return IRQ_HANDLED;
}
-/* DMA Tasklet */
-static void mpc_dma_tasklet(unsigned long data)
+/* proccess completed descriptors */
+static void mpc_dma_process_completed(struct mpc_dma *mdma)
{
- struct mpc_dma *mdma = (void *)data;
dma_cookie_t last_cookie = 0;
struct mpc_dma_chan *mchan;
struct mpc_dma_desc *mdesc;
struct dma_async_tx_descriptor *desc;
unsigned long flags;
LIST_HEAD(list);
- uint es;
int i;
+ for (i = 0; i < mdma->dma.chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!list_empty(&mchan->completed))
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (list_empty(&list))
+ continue;
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ desc = &mdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ list_splice_tail_init(&list, &mchan->free);
+ mchan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+ struct mpc_dma *mdma = (void *)data;
+ unsigned long flags;
+ uint es;
+
spin_lock_irqsave(&mdma->error_status_lock, flags);
es = mdma->error_status;
mdma->error_status = 0;
@@ -370,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data)
dev_err(mdma->dma.dev, "- Destination Bus Error\n");
}
- for (i = 0; i < mdma->dma.chancnt; i++) {
- mchan = &mdma->channels[i];
-
- /* Get all completed descriptors */
- spin_lock_irqsave(&mchan->lock, flags);
- if (!list_empty(&mchan->completed))
- list_splice_tail_init(&mchan->completed, &list);
- spin_unlock_irqrestore(&mchan->lock, flags);
-
- if (list_empty(&list))
- continue;
-
- /* Execute callbacks and run dependencies */
- list_for_each_entry(mdesc, &list, node) {
- desc = &mdesc->desc;
-
- if (desc->callback)
- desc->callback(desc->callback_param);
-
- last_cookie = desc->cookie;
- dma_run_dependencies(desc);
- }
-
- /* Free descriptors */
- spin_lock_irqsave(&mchan->lock, flags);
- list_splice_tail_init(&list, &mchan->free);
- mchan->completed_cookie = last_cookie;
- spin_unlock_irqrestore(&mchan->lock, flags);
- }
+ mpc_dma_process_completed(mdma);
}
/* Submit descriptor to hardware */
@@ -563,6 +580,7 @@ static struct dma_async_tx_descriptor *
mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc = NULL;
struct mpc_dma_tcd *tcd;
@@ -577,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
}
spin_unlock_irqrestore(&mchan->lock, iflags);
- if (!mdesc)
+ if (!mdesc) {
+ /* try to free completed descriptors */
+ mpc_dma_process_completed(mdma);
return NULL;
+ }
mdesc->error = 0;
tcd = mdesc->tcd;
@@ -591,7 +612,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
tcd->dsize = MPC_DMA_TSIZE_32;
tcd->soff = 32;
tcd->doff = 32;
- } else if (IS_ALIGNED(src | dst | len, 16)) {
+ } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
+ /* MPC8308 doesn't support 16 byte transfers */
tcd->ssize = MPC_DMA_TSIZE_16;
tcd->dsize = MPC_DMA_TSIZE_16;
tcd->soff = 16;
@@ -651,6 +673,15 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
return -EINVAL;
}
+ if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
+ mdma->is_mpc8308 = 1;
+ mdma->irq2 = irq_of_parse_and_map(dn, 1);
+ if (mdma->irq2 == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+ }
+
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
@@ -681,11 +712,23 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
return -EINVAL;
}
+ if (mdma->is_mpc8308) {
+ retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
+ DRV_NAME, mdma);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ2!\n");
+ return -EINVAL;
+ }
+ }
+
spin_lock_init(&mdma->error_status_lock);
dma = &mdma->dma;
dma->dev = dev;
- dma->chancnt = MPC_DMA_CHANNELS;
+ if (!mdma->is_mpc8308)
+ dma->chancnt = MPC_DMA_CHANNELS;
+ else
+ dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
@@ -721,26 +764,40 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
* - Round-robin group arbitration,
* - Round-robin channel arbitration.
*/
- out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
- MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
-
- /* Disable hardware DMA requests */
- out_be32(&mdma->regs->dmaerqh, 0);
- out_be32(&mdma->regs->dmaerql, 0);
-
- /* Disable error interrupts */
- out_be32(&mdma->regs->dmaeeih, 0);
- out_be32(&mdma->regs->dmaeeil, 0);
-
- /* Clear interrupts status */
- out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
-
- /* Route interrupts to IPIC */
- out_be32(&mdma->regs->dmaihsa, 0);
- out_be32(&mdma->regs->dmailsa, 0);
+ if (!mdma->is_mpc8308) {
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+ MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+
+ /* Disable hardware DMA requests */
+ out_be32(&mdma->regs->dmaerqh, 0);
+ out_be32(&mdma->regs->dmaerql, 0);
+
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeih, 0);
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Route interrupts to IPIC */
+ out_be32(&mdma->regs->dmaihsa, 0);
+ out_be32(&mdma->regs->dmailsa, 0);
+ } else {
+ /* MPC8308 has 16 channels and lacks some registers */
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
+
+ /* enable snooping */
+ out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmaintl, 0xFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFF);
+ }
/* Register DMA engine */
dev_set_drvdata(dev, mdma);
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb98b73..0cb2ba50af5 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2d4e17a004d..952b3d4fb2a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
@@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!crtc->enabled)
return true;
+ saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+
/* XXX free adjustedmode */
drm_mode_destroy(dev, adjusted_mode);
/* FIXME: add subpixel order */
done:
if (!ret) {
+ crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
@@ -650,6 +661,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
old_fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
+ set->crtc->fb = old_fb;
ret = -EINVAL;
goto fail;
}
@@ -664,8 +676,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
- if (ret != 0)
+ if (ret != 0) {
+ set->crtc->fb = old_fb;
goto fail;
+ }
}
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d2849e4ea4d..0307d601f5e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,6 +607,25 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
+void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
+{
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+ FB_VISUAL_TRUECOLOR;
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.type_aux = 0;
+
+ info->fix.line_length = fb->pitch;
+ return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
@@ -816,6 +835,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
+ drm_fb_helper_fill_fix(info, fb_helper->fb);
}
mutex_unlock(&dev->mode_config.mutex);
@@ -953,6 +973,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb) {
info->var.pixclock = 0;
+ drm_fb_helper_fill_fix(info, fb_helper->fb);
if (register_framebuffer(info) < 0) {
return -EINVAL;
}
@@ -979,24 +1000,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
-{
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
- FB_VISUAL_TRUECOLOR;
- info->fix.type_aux = 0;
- info->fix.xpanstep = 1; /* doing it in hw */
- info->fix.ypanstep = 1; /* doing it in hw */
- info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_NONE;
- info->fix.type_aux = 0;
-
- info->fix.line_length = pitch;
- return;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
@@ -1005,6 +1008,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
@@ -1530,3 +1534,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
+static int __init drm_fb_helper_modinit(void)
+{
+ const char *name = "fbcon";
+ struct module *fbcon;
+
+ mutex_lock(&module_mutex);
+ fbcon = find_module(name);
+ mutex_unlock(&module_mutex);
+
+ if (!fbcon)
+ request_module_nowait(name);
+ return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index a39794bac04..2ec7d48fc4a 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 16d5155edad..0054e957203 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -40,6 +40,22 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+ ((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
/**
* Get interrupt from bus id.
*
@@ -77,6 +93,87 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return 0;
}
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+ memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+ DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 vblcount;
+ s64 diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ * Disable preemption, so vblank_time_lock is held as short as
+ * possible, even under a kernel with PREEMPT_RT patches.
+ */
+ preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank_enabled[crtc] = 0;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+ atomic_inc(&dev->_vblank_count[crtc]);
+
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ preempt_enable();
+}
+
static void vblank_disable_fn(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -91,10 +188,7 @@ static void vblank_disable_fn(unsigned long arg)
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- dev->vblank_enabled[i] = 0;
+ vblank_disable_and_save(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -117,6 +211,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
kfree(dev->last_vblank);
kfree(dev->last_vblank_wait);
kfree(dev->vblank_inmodeset);
+ kfree(dev->_vblank_time);
dev->num_crtcs = 0;
}
@@ -129,6 +224,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
+ spin_lock_init(&dev->vblank_time_lock);
+
dev->num_crtcs = num_crtcs;
dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -161,6 +258,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->vblank_inmodeset)
goto err;
+ dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+ sizeof(struct timeval), GFP_KERNEL);
+ if (!dev->_vblank_time)
+ goto err;
+
+ DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
init_waitqueue_head(&dev->vbl_queue[i]);
@@ -279,7 +389,7 @@ EXPORT_SYMBOL(drm_irq_install);
*
* Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
*/
-int drm_irq_uninstall(struct drm_device * dev)
+int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
int irq_enabled, i;
@@ -335,7 +445,9 @@ int drm_control(struct drm_device *dev, void *data,
{
struct drm_control *ctl = data;
- /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+ /* if we haven't irq we fallback for compatibility reasons -
+ * this used to be a separate function in drm_dma.h
+ */
switch (ctl->func) {
@@ -360,6 +472,287 @@ int drm_control(struct drm_device *dev, void *data,
}
/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+ s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ u64 dotclock;
+
+ /* Dot clock in Hz: */
+ dotclock = (u64) crtc->hwmode.clock * 1000;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ /* Convert scanline length in pixels and video dot clock to
+ * line duration, frame duration and pixel duration in
+ * nanoseconds:
+ */
+ pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
+ linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
+ 1000000000), dotclock);
+ framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, crtc->hwmode.crtc_htotal,
+ crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+ (int) linedur_ns, (int) pixeldur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ * On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid crtc.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc)
+{
+ struct timeval stime, raw_time;
+ struct drm_display_mode *mode;
+ int vbl_status, vtotal, vdisplay;
+ int vpos, hpos, i;
+ s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ mode = &refcrtc->hwmode;
+ vtotal = mode->crtc_vtotal;
+ vdisplay = mode->crtc_vdisplay;
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Don't know yet how to handle interlaced or
+ * double scan modes. Just no-op for now.
+ */
+ if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
+ return -ENOTSUPP;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration even on PREEMPT_RT kernel.
+ */
+ preempt_disable();
+
+ /* Get system timestamp before query. */
+ do_gettimeofday(&stime);
+
+ /* Get vertical and horizontal scanout pos. vpos, hpos. */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+ /* Get system timestamp after query. */
+ do_gettimeofday(&raw_time);
+
+ preempt_enable();
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
+ }
+
+ duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= (s64) *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, (int) duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = (int) duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+ ((vdisplay - vpos) < vtotal / 100)) {
+ delta_ns = delta_ns - framedur_ns;
+
+ /* Signal this correction as "applied". */
+ vbl_status |= 0x8;
+ }
+
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+
+ DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n",
+ crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
+ raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
+ (int) duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
+{
+ int ret = 0;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return gettimeofday timestamp as best estimate.
+ */
+ do_gettimeofday(tvblank);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
* @crtc: which counter to retrieve
@@ -375,6 +768,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ smp_rmb();
+ } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+ return cur_vblank;
+}
+EXPORT_SYMBOL(drm_vblank_count_and_time);
+
+/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
@@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count);
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- u32 cur_vblank, diff;
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
/*
* Interrupts were disabled prior to this call, so deal with counter
@@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
*/
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
@@ -413,6 +851,16 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ smp_wmb();
+ }
+
atomic_add(diff, &dev->_vblank_count[crtc]);
}
@@ -429,15 +877,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
- unsigned long irqflags;
+ unsigned long irqflags, irqflags2;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ /* Disable preemption while holding vblank_time_lock. Do
+ * it explicitely to guard against PREEMPT_RT kernel.
+ */
+ preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
if (!dev->vblank_enabled[crtc]) {
+ /* Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+ crtc, ret);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
else {
@@ -445,6 +905,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
drm_update_vblank_count(dev, crtc);
}
}
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+ preempt_enable();
} else {
if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
@@ -463,15 +925,17 @@ EXPORT_SYMBOL(drm_vblank_get);
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
- * if possible.
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
- BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+ BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
/* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+ (drm_vblank_offdelay > 0))
+ mod_timer(&dev->vblank_disable_timer,
+ jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
@@ -480,10 +944,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->driver->disable_vblank(dev, crtc);
+ vblank_disable_and_save(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
@@ -602,7 +1064,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
- do_gettimeofday(&now);
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
@@ -611,7 +1072,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
}
file_priv->event_space -= sizeof e->event;
- seq = drm_vblank_count(dev, pipe);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1 << 23)) {
vblwait->request.sequence = seq + 1;
@@ -626,15 +1088,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.sequence = seq;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ vblwait->reply.sequence = seq;
trace_drm_vblank_event_delivered(current->pid, pipe,
vblwait->request.sequence);
} else {
list_add_tail(&e->base.link, &dev->vblank_event_list);
+ vblwait->reply.sequence = vblwait->request.sequence;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -727,11 +1192,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (ret != -EINTR) {
struct timeval now;
- do_gettimeofday(&now);
-
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+
DRM_DEBUG("returning %d to client\n",
vblwait->reply.sequence);
} else {
@@ -750,8 +1214,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
unsigned long flags;
unsigned int seq;
- do_gettimeofday(&now);
- seq = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -789,11 +1252,64 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*/
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+ unsigned long irqflags;
+
if (!dev->num_crtcs)
return;
- atomic_inc(&dev->_vblank_count[crtc]);
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank_enabled[crtc]) {
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
+ */
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+ smp_wmb();
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ atomic_inc(&dev->_vblank_count[crtc]);
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_handle_vblank_events(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc302ed9..c59515ba7e6 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
+ mm->scan_check_range = 0;
}
EXPORT_SYMBOL(drm_mm_init_scan);
/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct list_head *prev_free, *next_free;
struct drm_mm_node *prev_node, *next_node;
+ unsigned long adj_start;
+ unsigned long adj_end;
mm->scanned_blocks++;
@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
- if (check_free_hole(node->start, node->start + node->size,
+ if (mm->scan_check_range) {
+ adj_start = node->start < mm->scan_start ?
+ mm->scan_start : node->start;
+ adj_end = node->start + node->size > mm->scan_end ?
+ mm->scan_end : node->start + node->size;
+ } else {
+ adj_start = node->start;
+ adj_end = node->start + node->size;
+ }
+
+ if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index cdc89ee042c..d59edc18301 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,12 +40,22 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d5cc7..0ae6a7c5020 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_evict.o \
+ i915_gem_execbuffer.o \
+ i915_gem_gtt.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ceb63c..92f75782c33 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
#include "drmP.h"
#include "drm.h"
#include "intel_drv.h"
+#include "intel_ringbuffer.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
B(is_broadwater);
B(is_crestline);
B(has_fbc);
- B(has_rc6);
B(has_pipe_cxsr);
B(has_hotplug);
B(cursor_needs_physical);
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj_priv->user_pin_count > 0)
+ if (obj->user_pin_count > 0)
return "P";
- else if (obj_priv->pin_count > 0)
+ else if (obj->pin_count > 0)
return "p";
else
return " ";
}
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
default:
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
@@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+ seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -117,6 +117,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
+ obj->last_fenced_seqno,
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
@@ -124,7 +125,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+ seq_printf(m, " (gtt offset: %08x, size: %08x)",
+ obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ seq_printf(m, " (%s mappable)", s);
+ }
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
}
@@ -136,7 +147,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct list_head *head;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -171,12 +182,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj_priv, head, mm_list) {
+ list_for_each_entry(obj, head, mm_list) {
seq_printf(m, " ");
- describe_obj(m, obj_priv);
+ describe_obj(m, obj);
seq_printf(m, "\n");
- total_obj_size += obj_priv->base.size;
- total_gtt_size += obj_priv->gtt_space->size;
+ total_obj_size += obj->base.size;
+ total_gtt_size += obj->gtt_space->size;
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -186,24 +197,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+#define count_objects(list, member) do { \
+ list_for_each_entry(obj, list, member) { \
+ size += obj->gtt_space->size; \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += obj->gtt_space->size; \
+ ++mappable_count; \
+ } \
+ } \
+} while(0)
+
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 count, mappable_count;
+ size_t size, mappable_size;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
- seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
- seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
- seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
- seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
- seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
- seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+ seq_printf(m, "%u objects, %zu bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.object_memory);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&dev_priv->mm.flushing_list, mm_list);
+ seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.pinned_list, mm_list);
+ seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.inactive_list, mm_list);
+ seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+ seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->fault_mappable) {
+ size += obj->gtt_space->size;
+ ++count;
+ }
+ if (obj->pin_mappable) {
+ mappable_size += obj->gtt_space->size;
+ ++mappable_count;
+ }
+ }
+ seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ mappable_count, mappable_size);
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+ seq_printf(m, "%zu [%zu] gtt total\n",
+ dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
mutex_unlock(&dev->struct_mutex);
@@ -243,14 +309,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "%d prepares\n", work->pending);
if (work->old_fb_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
- if(obj_priv)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->old_fb_obj;
+ if (obj)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
if (work->pending_flip_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
- if(obj_priv)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->pending_flip_obj;
+ if (obj)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +331,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
- int ret;
+ int ret, count;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
+ count = 0;
+ if (!list_empty(&dev_priv->ring[RCS].request_list)) {
+ seq_printf(m, "Render requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[RCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[VCS].request_list)) {
+ seq_printf(m, "BSD requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[VCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[BCS].request_list)) {
+ seq_printf(m, "BLT requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[BCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
}
mutex_unlock(&dev->struct_mutex);
+ if (count == 0)
+ seq_printf(m, "No requests\n");
+
return 0;
}
+static void i915_ring_seqno_info(struct seq_file *m,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->get_seqno) {
+ seq_printf(m, "Current sequence (%s): %d\n",
+ ring->name, ring->get_seqno(ring));
+ seq_printf(m, "Waiter sequence (%s): %d\n",
+ ring->name, ring->waiting_seqno);
+ seq_printf(m, "IRQ sequence (%s): %d\n",
+ ring->name, ring->irq_seqno);
+ }
+}
+
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
@@ -315,7 +417,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -354,16 +456,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -383,29 +477,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+ struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- if (obj == NULL) {
- seq_printf(m, "Fenced object[%2d] = unused\n", i);
- } else {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(obj);
- seq_printf(m, "Fenced object[%2d] = %p: %s "
- "%08x %08zx %08x %s %08x %08x %d",
- i, obj, get_pin_flag(obj_priv),
- obj_priv->gtt_offset,
- obj->size, obj_priv->stride,
- get_tiling_flag(obj_priv),
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
- seq_printf(m, "\n");
- }
+ seq_printf(m, "Fenced object[%2d] = ", i);
+ if (obj == NULL)
+ seq_printf(m, "unused");
+ else
+ describe_obj(m, obj);
+ seq_printf(m, "\n");
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -414,10 +496,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ struct intel_ring_buffer *ring;
volatile u32 *hws;
+ int i;
- hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ hws = (volatile u32 *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -431,14 +515,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
static void i915_dump_object(struct seq_file *m,
struct io_mapping *mapping,
- struct drm_i915_gem_object *obj_priv)
+ struct drm_i915_gem_object *obj)
{
int page, page_count, i;
- page_count = obj_priv->base.size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
u32 *mem = io_mapping_map_wc(mapping,
- obj_priv->gtt_offset + page * PAGE_SIZE);
+ obj->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
io_mapping_unmap(mem);
@@ -450,25 +534,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- obj = &obj_priv->base;
- if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n",
- obj_priv->gtt_offset);
- i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
}
}
mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -477,19 +557,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (!dev_priv->render_ring.gem_object) {
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
- u8 *virt = dev_priv->render_ring.virtual_start;
+ u8 *virt = ring->virtual_start;
uint32_t off;
- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+ for (off = 0; off < ring->size; off += 4) {
uint32_t *ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr);
}
@@ -504,19 +586,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned int head, tail;
+ struct intel_ring_buffer *ring;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (ring->size == 0)
+ return 0;
- seq_printf(m, "RingHead : %08x\n", head);
- seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+ seq_printf(m, "Ring %s:\n", ring->name);
+ seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
+ seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
+ seq_printf(m, " Size : %08x\n", ring->size);
+ seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
+ seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
+ if (IS_GEN6(dev)) {
+ seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
+ seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
+ }
+ seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
+ seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
return 0;
}
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RING_RENDER: return " render";
+ case RING_BSD: return " bsd";
+ case RING_BLT: return " blt";
+ default: return "";
+ }
+}
+
static const char *pin_flag(int pinned)
{
if (pinned > 0)
@@ -547,6 +648,36 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
+static void print_error_buffers(struct seq_file *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ seq_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->seqno,
+ pin_flag(err->pinned),
+ tiling_flag(err->tiling),
+ dirty_flag(err->dirty),
+ purgeable_flag(err->purgeable),
+ ring_str(err->ring));
+
+ if (err->name)
+ seq_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", err->fence_reg);
+
+ seq_printf(m, "\n");
+ err++;
+ }
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,41 +699,46 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
- seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, "ERROR: 0x%08x\n", error->error);
+ seq_printf(m, "Blitter command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
+ seq_printf(m, "Video (BSD) command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
+ }
+ seq_printf(m, "Render command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
if (INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
}
- seq_printf(m, "seqno: 0x%08x\n", error->seqno);
-
- if (error->active_bo_count) {
- seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
-
- for (i = 0; i < error->active_bo_count; i++) {
- seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
- error->active_bo[i].gtt_offset,
- error->active_bo[i].size,
- error->active_bo[i].read_domains,
- error->active_bo[i].write_domain,
- error->active_bo[i].seqno,
- pin_flag(error->active_bo[i].pinned),
- tiling_flag(error->active_bo[i].tiling),
- dirty_flag(error->active_bo[i].dirty),
- purgeable_flag(error->active_bo[i].purgeable));
-
- if (error->active_bo[i].name)
- seq_printf(m, " (name: %d)", error->active_bo[i].name);
- if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
-
- seq_printf(m, "\n");
- }
- }
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno);
+
+ for (i = 0; i < 16; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo,
+ error->active_bo_count);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo,
+ error->pinned_bo_count);
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
if (error->batchbuffer[i]) {
@@ -635,6 +771,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -658,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
- u16 rgvstat = I915_READ16(MEMSTAT_ILK);
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
- MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
- (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ if (IS_GEN5(dev)) {
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_GEN6(dev)) {
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ int max_freq;
+
+ /* RPSTAT1 is in the GT power well */
+ __gen6_force_wake_get(dev_priv);
+
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & 0xff00) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+
+ max_freq = (rp_state_cap & 0xff0000) >> 16;
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = rp_state_cap & 0xff;
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ max_freq * 100);
+
+ __gen6_force_wake_put(dev_priv);
+ } else {
+ seq_printf(m, "no P-state info available\n");
+ }
return 0;
}
@@ -794,7 +969,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
- if (IS_GEN5(dev))
+ if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1061,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1073,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
}
@@ -943,7 +1118,6 @@ i915_wedged_write(struct file *filp,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
char buf[20];
int val = 1;
@@ -959,12 +1133,7 @@ i915_wedged_write(struct file *filp,
}
DRM_INFO("Manually setting wedged to %d\n", val);
-
- atomic_set(&dev_priv->mm.wedged, val);
- if (val) {
- wake_up_all(&dev_priv->irq_queue);
- queue_work(dev_priv->wq, &dev_priv->error_work);
- }
+ i915_handle_error(dev, val);
return cnt;
}
@@ -1028,9 +1197,15 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_hws", i915_hws_info, 0},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+ {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+ {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+ {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
+ {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
+ {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+ {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+ {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cb900dc83d9..0568dbdc10e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,8 @@
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -58,11 +60,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -80,13 +81,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->render_ring.status_page.gfx_addr) {
- dev_priv->render_ring.status_page.gfx_addr = 0;
+ if (ring->status_page.gfx_addr) {
+ ring->status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
@@ -98,7 +101,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
/*
* We should never lose context on the ring with modesetting
@@ -107,8 +110,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -124,6 +127,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@@ -132,9 +137,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
@@ -148,6 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
@@ -158,24 +163,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->render_ring.gem_object != NULL) {
+ if (ring->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->render_ring.size = init->ring_size;
+ ring->size = init->ring_size;
- dev_priv->render_ring.map.offset = init->ring_start;
- dev_priv->render_ring.map.size = init->ring_size;
- dev_priv->render_ring.map.type = 0;
- dev_priv->render_ring.map.flags = 0;
- dev_priv->render_ring.map.mtrr = 0;
+ ring->map.offset = init->ring_start;
+ ring->map.size = init->ring_size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
- drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
+ drm_core_ioremap_wc(&ring->map, dev);
- if (dev_priv->render_ring.map.handle == NULL) {
+ if (ring->map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -183,7 +188,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
+ ring->virtual_start = ring->map.handle;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
@@ -202,12 +207,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__);
- ring = &dev_priv->render_ring;
-
if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -222,7 +225,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
@@ -264,7 +267,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
-static int do_validate_cmd(int cmd)
+static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
@@ -322,40 +325,27 @@ static int do_validate_cmd(int cmd)
return 0;
}
-static int validate_cmd(int cmd)
-{
- int ret = do_validate_cmd(cmd);
-
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
- return ret;
-}
-
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
- if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
return -EINVAL;
- BEGIN_LP_RING((dwords+1)&~1);
-
for (i = 0; i < dwords;) {
- int cmd, sz;
-
- cmd = buffer[i];
-
- if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+ int sz = validate_cmd(buffer[i]);
+ if (sz == 0 || i + sz > dwords)
return -EINVAL;
-
- OUT_RING(cmd);
-
- while (++i, --sz) {
- OUT_RING(buffer[i]);
- }
+ i += sz;
}
+ ret = BEGIN_LP_RING((dwords+1)&~1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
@@ -366,34 +356,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
int
i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4)
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
{
- struct drm_clip_rect box = boxes[i];
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box.x1, box.y1, box.x2, box.y2);
+ box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
return 0;
}
@@ -413,12 +410,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -440,7 +438,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- ret = i915_emit_box(dev, cliprects, i,
+ ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
@@ -459,8 +457,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
- int i = 0, count;
+ int i, count, ret;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -470,17 +469,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
i915_kernel_lost_context(dev);
count = nbox ? nbox : 1;
-
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- batch->DR1, batch->DR4);
+ ret = i915_emit_box(dev, &cliprects[i],
+ batch->DR1, batch->DR4);
if (ret)
return ret;
}
if (!IS_I830(dev) && !IS_845G(dev)) {
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ return ret;
+
if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
@@ -488,26 +489,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
}
if (IS_G4X(dev) || IS_GEN5(dev)) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(2) == 0) {
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
}
- i915_emit_breadcrumb(dev);
+ i915_emit_breadcrumb(dev);
return 0;
}
@@ -516,6 +520,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
+ int ret;
if (!master_priv->sarea_priv)
return -EINVAL;
@@ -527,12 +532,13 @@ static int i915_dispatch_flip(struct drm_device * dev)
i915_kernel_lost_context(dev);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(10);
+ if (ret)
+ return ret;
+
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(6);
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
if (dev_priv->current_page == 0) {
@@ -543,33 +549,32 @@ static int i915_dispatch_flip(struct drm_device * dev)
dev_priv->current_page = 0;
}
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
+
ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev);
- return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
- dev_priv->render_ring.size - 8);
+ return intel_wait_ring_buffer(ring, ring->size - 8);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -768,9 +773,15 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
case I915_PARAM_HAS_COHERENT_RINGS:
value = 1;
break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -826,7 +837,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -1005,73 +1016,47 @@ intel_teardown_mchbar(struct drm_device *dev)
#define PTE_VALID (1 << 0)
/**
- * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
* @dev: drm device
- * @gtt_addr: address to translate
+ * @offset: address to translate
*
- * Some chip functions require allocations from stolen space but need the
- * physical address of the memory in question. We use this routine
- * to get a physical address suitable for register programming from a given
- * GTT address.
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
*/
-static unsigned long i915_gtt_to_phys(struct drm_device *dev,
- unsigned long gtt_addr)
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
- unsigned long *gtt;
- unsigned long entry, phys;
- int gtt_bar = IS_GEN2(dev) ? 1 : 0;
- int gtt_offset, gtt_size;
-
- if (INTEL_INFO(dev)->gen >= 4) {
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
- gtt_offset = 2*1024*1024;
- gtt_size = 2*1024*1024;
- } else {
- gtt_offset = 512*1024;
- gtt_size = 512*1024;
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
} else {
- gtt_bar = 3;
- gtt_offset = 0;
- gtt_size = pci_resource_len(dev->pdev, gtt_bar);
- }
-
- gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
- gtt_size);
- if (!gtt) {
- DRM_ERROR("ioremap of GTT failed\n");
- return 0;
- }
-
- entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
-
- /* Mask out these reserved bits on this hardware. */
- if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
- entry &= ~PTE_ADDRESS_MASK_HIGH;
-
- /* If it's not a mapping type we know, then bail. */
- if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
- (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
- iounmap(gtt);
- return 0;
- }
-
- if (!(entry & PTE_VALID)) {
- DRM_ERROR("bad GTT entry in stolen space\n");
- iounmap(gtt);
- return 0;
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
}
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
- iounmap(gtt);
-
- phys =(entry & PTE_ADDRESS_MASK) |
- ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
-
- return phys;
+ return base + offset;
}
static void i915_warn_stolen(struct drm_device *dev)
@@ -1087,54 +1072,35 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long cfb_base;
unsigned long ll_base = 0;
- /* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
- if (!compressed_fb) {
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
- return;
- }
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb) {
- i915_warn_stolen(dev);
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- return;
- }
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
- cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
- if (!cfb_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- }
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
- if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
- 4096, 0);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
- if (!ll_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- drm_mm_put_block(compressed_llb);
- }
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
}
dev_priv->cfb_size = size;
intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1144,8 +1110,17 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->compressed_llb = compressed_llb;
}
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
- ll_base, size >> 20);
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
}
static void i915_cleanup_compression(struct drm_device *dev)
@@ -1176,12 +1151,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "i915: switched on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
printk(KERN_ERR "i915: switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1196,17 +1175,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_size,
- unsigned long agp_size)
+static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size, gtt_size, mappable_size;
int ret = 0;
- /* Basic memrange allocator for stolen space (aka mm.vram) */
- drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+ prealloc_size = dev_priv->mm.gtt->stolen_size;
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- /* Let GEM Manage from end of prealloc space to end of aperture.
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+ /* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently
@@ -1215,7 +1197,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
* at the last page of the aperture. One page should be enough to
* keep any prefetching inside of the aperture.
*/
- i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
@@ -1227,16 +1209,17 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
- /* Try to get an 8M buffer... */
- if (prealloc_size > (9*1024*1024))
- cfb_size = 8*1024*1024;
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
- /* Allow hardware batchbuffers unless told otherwise.
- */
+ /* Allow hardware batchbuffers unless told otherwise. */
dev_priv->allow_batchbuffer = 1;
ret = intel_parse_bios(dev);
@@ -1252,6 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
+ NULL,
i915_switcheroo_can_switch);
if (ret)
goto cleanup_vga_client;
@@ -1426,152 +1410,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
}
-struct v_table {
- u8 vid;
- unsigned long vd; /* in .1 mil */
- unsigned long vm; /* in .1 mil */
- u8 pvid;
-};
-
-static struct v_table v_table[] = {
- { 0, 16125, 15000, 0x7f, },
- { 1, 16000, 14875, 0x7e, },
- { 2, 15875, 14750, 0x7d, },
- { 3, 15750, 14625, 0x7c, },
- { 4, 15625, 14500, 0x7b, },
- { 5, 15500, 14375, 0x7a, },
- { 6, 15375, 14250, 0x79, },
- { 7, 15250, 14125, 0x78, },
- { 8, 15125, 14000, 0x77, },
- { 9, 15000, 13875, 0x76, },
- { 10, 14875, 13750, 0x75, },
- { 11, 14750, 13625, 0x74, },
- { 12, 14625, 13500, 0x73, },
- { 13, 14500, 13375, 0x72, },
- { 14, 14375, 13250, 0x71, },
- { 15, 14250, 13125, 0x70, },
- { 16, 14125, 13000, 0x6f, },
- { 17, 14000, 12875, 0x6e, },
- { 18, 13875, 12750, 0x6d, },
- { 19, 13750, 12625, 0x6c, },
- { 20, 13625, 12500, 0x6b, },
- { 21, 13500, 12375, 0x6a, },
- { 22, 13375, 12250, 0x69, },
- { 23, 13250, 12125, 0x68, },
- { 24, 13125, 12000, 0x67, },
- { 25, 13000, 11875, 0x66, },
- { 26, 12875, 11750, 0x65, },
- { 27, 12750, 11625, 0x64, },
- { 28, 12625, 11500, 0x63, },
- { 29, 12500, 11375, 0x62, },
- { 30, 12375, 11250, 0x61, },
- { 31, 12250, 11125, 0x60, },
- { 32, 12125, 11000, 0x5f, },
- { 33, 12000, 10875, 0x5e, },
- { 34, 11875, 10750, 0x5d, },
- { 35, 11750, 10625, 0x5c, },
- { 36, 11625, 10500, 0x5b, },
- { 37, 11500, 10375, 0x5a, },
- { 38, 11375, 10250, 0x59, },
- { 39, 11250, 10125, 0x58, },
- { 40, 11125, 10000, 0x57, },
- { 41, 11000, 9875, 0x56, },
- { 42, 10875, 9750, 0x55, },
- { 43, 10750, 9625, 0x54, },
- { 44, 10625, 9500, 0x53, },
- { 45, 10500, 9375, 0x52, },
- { 46, 10375, 9250, 0x51, },
- { 47, 10250, 9125, 0x50, },
- { 48, 10125, 9000, 0x4f, },
- { 49, 10000, 8875, 0x4e, },
- { 50, 9875, 8750, 0x4d, },
- { 51, 9750, 8625, 0x4c, },
- { 52, 9625, 8500, 0x4b, },
- { 53, 9500, 8375, 0x4a, },
- { 54, 9375, 8250, 0x49, },
- { 55, 9250, 8125, 0x48, },
- { 56, 9125, 8000, 0x47, },
- { 57, 9000, 7875, 0x46, },
- { 58, 8875, 7750, 0x45, },
- { 59, 8750, 7625, 0x44, },
- { 60, 8625, 7500, 0x43, },
- { 61, 8500, 7375, 0x42, },
- { 62, 8375, 7250, 0x41, },
- { 63, 8250, 7125, 0x40, },
- { 64, 8125, 7000, 0x3f, },
- { 65, 8000, 6875, 0x3e, },
- { 66, 7875, 6750, 0x3d, },
- { 67, 7750, 6625, 0x3c, },
- { 68, 7625, 6500, 0x3b, },
- { 69, 7500, 6375, 0x3a, },
- { 70, 7375, 6250, 0x39, },
- { 71, 7250, 6125, 0x38, },
- { 72, 7125, 6000, 0x37, },
- { 73, 7000, 5875, 0x36, },
- { 74, 6875, 5750, 0x35, },
- { 75, 6750, 5625, 0x34, },
- { 76, 6625, 5500, 0x33, },
- { 77, 6500, 5375, 0x32, },
- { 78, 6375, 5250, 0x31, },
- { 79, 6250, 5125, 0x30, },
- { 80, 6125, 5000, 0x2f, },
- { 81, 6000, 4875, 0x2e, },
- { 82, 5875, 4750, 0x2d, },
- { 83, 5750, 4625, 0x2c, },
- { 84, 5625, 4500, 0x2b, },
- { 85, 5500, 4375, 0x2a, },
- { 86, 5375, 4250, 0x29, },
- { 87, 5250, 4125, 0x28, },
- { 88, 5125, 4000, 0x27, },
- { 89, 5000, 3875, 0x26, },
- { 90, 4875, 3750, 0x25, },
- { 91, 4750, 3625, 0x24, },
- { 92, 4625, 3500, 0x23, },
- { 93, 4500, 3375, 0x22, },
- { 94, 4375, 3250, 0x21, },
- { 95, 4250, 3125, 0x20, },
- { 96, 4125, 3000, 0x1f, },
- { 97, 4125, 3000, 0x1e, },
- { 98, 4125, 3000, 0x1d, },
- { 99, 4125, 3000, 0x1c, },
- { 100, 4125, 3000, 0x1b, },
- { 101, 4125, 3000, 0x1a, },
- { 102, 4125, 3000, 0x19, },
- { 103, 4125, 3000, 0x18, },
- { 104, 4125, 3000, 0x17, },
- { 105, 4125, 3000, 0x16, },
- { 106, 4125, 3000, 0x15, },
- { 107, 4125, 3000, 0x14, },
- { 108, 4125, 3000, 0x13, },
- { 109, 4125, 3000, 0x12, },
- { 110, 4125, 3000, 0x11, },
- { 111, 4125, 3000, 0x10, },
- { 112, 4125, 3000, 0x0f, },
- { 113, 4125, 3000, 0x0e, },
- { 114, 4125, 3000, 0x0d, },
- { 115, 4125, 3000, 0x0c, },
- { 116, 4125, 3000, 0x0b, },
- { 117, 4125, 3000, 0x0a, },
- { 118, 4125, 3000, 0x09, },
- { 119, 4125, 3000, 0x08, },
- { 120, 1125, 0, 0x07, },
- { 121, 1000, 0, 0x06, },
- { 122, 875, 0, 0x05, },
- { 123, 750, 0, 0x04, },
- { 124, 625, 0, 0x03, },
- { 125, 500, 0, 0x02, },
- { 126, 375, 0, 0x01, },
- { 127, 0, 0, 0x00, },
-};
-
-struct cparams {
- int i;
- int t;
- int m;
- int c;
-};
-
-static struct cparams cparams[] = {
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
{ 1, 1333, 301, 28664 },
{ 1, 1066, 294, 24460 },
{ 1, 800, 294, 25192 },
@@ -1637,21 +1481,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
return ((m * x) / 127) - b;
}
-static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
- unsigned long val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(v_table); i++) {
- if (v_table[i].pvid == pxvid) {
- if (IS_MOBILE(dev_priv->dev))
- val = v_table[i].vm;
- else
- val = v_table[i].vd;
- }
- }
-
- return val;
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
}
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1905,9 +1873,9 @@ ips_ping_for_i915_load(void)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
- resource_size_t base, size;
int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size;
+ uint32_t agp_size;
+
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1923,11 +1891,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = (struct intel_device_info *) flags;
- /* Add register map (needed for suspend/resume) */
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
- base = pci_resource_start(dev->pdev, mmio_bar);
- size = pci_resource_len(dev->pdev, mmio_bar);
-
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1937,16 +1900,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
- dev_priv->regs = ioremap(base, size);
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto out_iomapfree;
+ }
+
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base,
- dev->agp->agp_info.aper_size * 1024*1024);
+ io_mapping_create_wc(dev->agp->base, agp_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
@@ -1958,24 +1930,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* MTRR if present. Even if a UC MTRR isn't present.
*/
dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024,
+ agp_size,
MTRR_TYPE_WRCOMB, 1);
if (dev_priv->mm.gtt_mtrr < 0) {
DRM_INFO("MTRR allocation failed. Graphics "
"performance may suffer.\n");
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_iomapfree;
- }
-
- prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1983,7 +1944,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* bo.
*
* It is also used for periodic low-priority events, such as
- * idle-timers and hangcheck.
+ * idle-timers and recording error state.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
@@ -2001,20 +1962,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */
dev_priv->has_gem = 1;
- if (prealloc_size > agp_size * 3 / 4) {
- DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
- "memory stolen.\n",
- prealloc_size / 1024, agp_size / 1024);
- DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
- "updating the BIOS to fix).\n");
- dev_priv->has_gem = 0;
- }
-
if (dev_priv->has_gem == 0 &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
ret = -ENODEV;
- goto out_iomapfree;
+ goto out_workqueue_free;
}
dev->driver->get_vblank_counter = i915_get_vblank_counter;
@@ -2037,8 +1989,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
- if (ret != 0)
- goto out_workqueue_free;
+ if (ret)
+ goto out_gem_unload;
}
if (IS_PINEVIEW(dev))
@@ -2060,16 +2012,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->user_irq_lock);
+ spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
-
- if (ret) {
- (void) i915_driver_unload(dev);
- return ret;
- }
+ if (ret)
+ goto out_gem_unload;
/* Start out suspended */
dev_priv->mm.suspended = 1;
@@ -2077,10 +2026,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto out_workqueue_free;
+ goto out_gem_unload;
}
}
@@ -2100,12 +2049,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
+out_gem_unload:
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
out_workqueue_free:
destroy_workqueue(dev_priv->wq);
out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -2122,6 +2077,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev);
if (ret)
@@ -2179,7 +2137,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->mm.vram);
+ drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2188,7 +2146,7 @@ int i915_driver_unload(struct drm_device *dev)
}
if (dev_priv->regs != NULL)
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960712e..87249333198 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
};
@@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
@@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
- .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
@@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
@@ -244,10 +245,34 @@ void intel_detect_pch (struct drm_device *dev)
}
}
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+ udelay(10);
+}
+
+void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(FORCEWAKE);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_kms_helper_poll_disable(dev);
+
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
@@ -284,7 +309,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
- drm_kms_helper_poll_disable(dev);
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
error = i915_drm_freeze(dev);
if (error)
@@ -304,6 +331,12 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -332,6 +365,9 @@ int i915_resume(struct drm_device *dev)
{
int ret;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
if (pci_enable_device(dev->pdev))
return -EIO;
@@ -405,6 +441,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
+static int gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+ return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+}
+
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
@@ -431,7 +475,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true;
int ret;
- mutex_lock(&dev->struct_mutex);
+ if (!mutex_trylock(&dev->struct_mutex))
+ return -EBUSY;
i915_gem_reset(dev);
@@ -439,6 +484,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ ret = gen6_do_reset(dev, flags);
+ break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
@@ -472,9 +520,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
- ring->init(dev, ring);
+
+ dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
+ if (HAS_BSD(dev))
+ dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
+ if (HAS_BLT(dev))
+ dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -523,6 +576,9 @@ static int i915_pm_suspend(struct device *dev)
return -ENODEV;
}
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
error = i915_drm_freeze(drm_dev);
if (error)
return error;
@@ -606,6 +662,8 @@ static struct drm_driver driver = {
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
+ .get_vblank_timestamp = i915_get_vblank_timestamp,
+ .get_scanout_position = i915_get_crtc_scanoutpos,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +719,6 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
- i915_gem_shrinker_init();
-
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@@ -684,17 +740,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
- if (!(driver.driver_features & DRIVER_MODESET)) {
- driver.suspend = i915_suspend;
- driver.resume = i915_resume;
- }
-
return drm_init(&driver);
}
static void __exit i915_exit(void)
{
- i915_gem_shrinker_exit();
drm_exit(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826da309..aac1bf332f7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object {
int id;
struct page **page_list;
drm_dma_handle_t *handle;
- struct drm_gem_object *cur_obj;
+ struct drm_i915_gem_object *cur_obj;
};
struct mem_block {
@@ -124,9 +124,9 @@ struct drm_i915_master_private {
#define I915_FENCE_REG_NONE -1
struct drm_i915_fence_reg {
- struct drm_gem_object *obj;
struct list_head lru_list;
- bool gpu;
+ struct drm_i915_gem_object *obj;
+ uint32_t setup_seqno;
};
struct sdvo_device_mapping {
@@ -139,6 +139,8 @@ struct sdvo_device_mapping {
u8 ddc_pin;
};
+struct intel_display_error_state;
+
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
@@ -148,11 +150,23 @@ struct drm_i915_error_state {
u32 ipehr;
u32 instdone;
u32 acthd;
+ u32 error; /* gen6+ */
+ u32 bcs_acthd; /* gen6+ blt engine */
+ u32 bcs_ipehr;
+ u32 bcs_ipeir;
+ u32 bcs_instdone;
+ u32 bcs_seqno;
+ u32 vcs_acthd; /* gen6+ bsd engine */
+ u32 vcs_ipehr;
+ u32 vcs_ipeir;
+ u32 vcs_instdone;
+ u32 vcs_seqno;
u32 instpm;
u32 instps;
u32 instdone1;
u32 seqno;
u64 bbaddr;
+ u64 fence[16];
struct timeval time;
struct drm_i915_error_object {
int page_count;
@@ -171,9 +185,11 @@ struct drm_i915_error_state {
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
- } *active_bo;
- u32 active_bo_count;
+ u32 ring:4;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
};
struct drm_i915_display_funcs {
@@ -207,7 +223,6 @@ struct intel_device_info {
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 has_fbc : 1;
- u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
@@ -243,6 +258,7 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
int has_gem;
+ int relative_constants_mode;
void __iomem *regs;
@@ -253,20 +269,15 @@ typedef struct drm_i915_private {
} *gmbus;
struct pci_dev *bridge_dev;
- struct intel_ring_buffer render_ring;
- struct intel_ring_buffer bsd_ring;
- struct intel_ring_buffer blt_ring;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
- void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
- struct drm_gem_object *seqno_obj;
- struct drm_gem_object *pwrctx;
- struct drm_gem_object *renderctx;
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
struct resource mch_res;
@@ -275,25 +286,17 @@ typedef struct drm_i915_private {
int front_offset;
int current_page;
int page_flipping;
-#define I915_DEBUG_READ (1<<0)
-#define I915_DEBUG_WRITE (1<<1)
- unsigned long debug_flags;
- wait_queue_head_t irq_queue;
atomic_t irq_received;
- /** Protects user_irq_refcount and irq_mask_reg */
- spinlock_t user_irq_lock;
u32 trace_irq_seqno;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask_reg;
u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on Ironlake,
- irq_mask_reg is still used for display irq. */
- u32 gt_irq_mask_reg;
- u32 gt_irq_enable_reg;
- u32 de_irq_enable_reg;
- u32 pch_irq_mask_reg;
- u32 pch_irq_enable_reg;
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -306,7 +309,7 @@ typedef struct drm_i915_private {
int num_pipe;
/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
@@ -530,23 +533,21 @@ typedef struct drm_i915_private {
struct {
/** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
+ const struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
- struct drm_mm vram;
+ struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head gtt_list;
+ /** End of mappable part of GTT */
+ unsigned long gtt_mappable_end;
struct io_mapping *gtt_mapping;
int gtt_mtrr;
- /**
- * Membership on list of all loaded devices, used to evict
- * inactive buffers under memory pressure.
- *
- * Modifications should only be done whilst holding the
- * shrink_list_lock spinlock.
- */
- struct list_head shrink_list;
+ struct shrinker inactive_shrinker;
/**
* List of objects currently involved in rendering.
@@ -609,16 +610,6 @@ typedef struct drm_i915_private {
struct delayed_work retire_work;
/**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
- /**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
@@ -645,16 +636,11 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
- uint32_t flush_rings;
-
/* accounting, useful for userland debugging */
- size_t object_memory;
- size_t pin_memory;
- size_t gtt_memory;
size_t gtt_total;
+ size_t mappable_gtt_total;
+ size_t object_memory;
u32 object_count;
- u32 pin_count;
- u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +674,14 @@ typedef struct drm_i915_private {
u8 fmax;
u8 fstart;
- u64 last_count1;
- unsigned long last_time1;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
+ u64 last_count1;
+ unsigned long last_time1;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason;
@@ -709,20 +695,20 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev;
} drm_i915_private_t;
-/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on eviction list */
- struct list_head evict_list;
+ /** This object's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
/**
* This is set if the object is on the active or flushing lists
@@ -738,6 +724,12 @@ struct drm_i915_gem_object {
unsigned int dirty : 1;
/**
+ * This is set if the object has been written to since the last
+ * GPU flush.
+ */
+ unsigned int pending_gpu_write : 1;
+
+ /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -747,29 +739,15 @@ struct drm_i915_gem_object {
signed int fence_reg : 5;
/**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- unsigned int in_execbuffer : 1;
-
- /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv : 2;
/**
- * Refcount for the pages array. With the current locking scheme, there
- * are at most two concurrent users: Binding a bo to the gtt and
- * pwrite/pread using physical addresses. So two bits for a maximum
- * of two users are enough.
- */
- unsigned int pages_refcount : 2;
-#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
-
- /**
* Current tiling mode for the object.
*/
unsigned int tiling_mode : 2;
+ unsigned int tiling_changed : 1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +761,54 @@ struct drm_i915_gem_object {
unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
- /** AGP memory structure for our GTT binding. */
- DRM_AGP_MEM *agp_mem;
+ /**
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
+ */
+ unsigned int map_and_fenceable : 1;
+
+ /**
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
+ */
+ unsigned int fault_mappable : 1;
+ unsigned int pin_mappable : 1;
+
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access:1;
+ unsigned int fenced_gpu_access:1;
struct page **pages;
/**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
+ * DMAR support
*/
- uint32_t gtt_offset;
+ struct scatterlist *sg_list;
+ int num_sg;
- /* Which ring is refering to is this object */
- struct intel_ring_buffer *ring;
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
/**
- * Fake offset for use by mmap(2)
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
*/
- uint64_t mmap_offset;
+ uint32_t gtt_offset;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
+ struct intel_ring_buffer *ring;
+
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ uint32_t last_fenced_seqno;
+ struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -880,6 +884,68 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+#include "i915_trace.h"
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
@@ -907,8 +973,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4);
+ struct drm_clip_rect *box,
+ int DR1, int DR4);
extern int i915_reset(struct drm_device *dev, u8 flags);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +984,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -953,6 +1020,13 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1091,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size);
+void i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno);
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1035,73 +1122,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
-int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring);
-int i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno,
- bool interruptible,
- struct intel_ring_buffer *ring);
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_add_request(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring);
+int __must_check i915_do_wait_request(struct drm_device *dev,
+ uint32_t seqno,
+ bool interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int __must_check
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-void i915_gem_shrinker_init(void);
-void i915_gem_shrinker_exit(void);
+/* i915_gem_gtt.c */
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable);
+int __must_check i915_gem_evict_everything(struct drm_device *dev,
+ bool purgeable_only);
+int __must_check i915_gem_evict_inactive(struct drm_device *dev,
+ bool purgeable_only);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
-bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
- int tiling_mode);
-bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
- int tiling_mode);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+ int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
/* i915_debugfs.c */
@@ -1163,6 +1265,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1170,79 +1273,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
#endif
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
- == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file_priv); \
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
-static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+
+#define __i915_read(x, y) \
+static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ u##x val = read##y(dev_priv->regs + reg); \
+ trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+ return val; \
+}
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+ write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+#define I915_READ8(reg) i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg) i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
+#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+
+#define I915_READ(reg) i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
+#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
+
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
+static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
- val = readl(dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_READ)
- printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+ if (dev_priv->info->gen >= 6) {
+ __gen6_force_wake_get(dev_priv);
+ val = I915_READ(reg);
+ __gen6_force_wake_put(dev_priv);
+ } else
+ val = I915_READ(reg);
+
return val;
}
-static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
- u32 val)
+static inline void
+i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
- writel(val, dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_WRITE)
- printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+ /* Trace down the write operation before the real write */
+ trace_i915_reg_rw('W', reg, val, len);
+ switch (len) {
+ case 8:
+ writeq(val, dev_priv->regs + reg);
+ break;
+ case 4:
+ writel(val, dev_priv->regs + reg);
+ break;
+ case 2:
+ writew(val, dev_priv->regs + reg);
+ break;
+ case 1:
+ writeb(val, dev_priv->regs + reg);
+ break;
+ }
}
-#define I915_READ(reg) i915_read(dev_priv, (reg))
-#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
-#define I915_READ16(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
-#define I915_READ8(reg) readb(dev_priv->regs + (reg))
-#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
-#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
-#define I915_READ64(reg) readq(dev_priv->regs + (reg))
-#define POSTING_READ(reg) (void)I915_READ(reg)
-#define POSTING_READ16(reg) (void)I915_READ16(reg)
-
-#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
- I915_DEBUG_WRITE)
-#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
- I915_DEBUG_WRITE))
-
-#define I915_VERBOSE 0
-
-#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
-} while (0)
-
-
-#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv__->render_ring, x); \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv__->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv__->render_ring); \
-} while(0)
-
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1403,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
- (dev_priv->render_ring.status_page.page_addr))[reg])
+ (LP_RING(dev_priv)->status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 275ec6ed43a..c79c0b62ef6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,38 +34,31 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
-#include <linux/intel-gtt.h>
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+ bool write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset,
uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
- bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+ struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask);
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
-
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -82,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
-static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count++;
- dev_priv->mm.gtt_memory += size;
-}
-
-static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count--;
- dev_priv->mm.gtt_memory -= size;
-}
-
-static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count++;
- dev_priv->mm.pin_memory += size;
-}
-
-static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count--;
- dev_priv->mm.pin_memory -= size;
-}
-
int
i915_gem_check_is_wedged(struct drm_device *dev)
{
@@ -140,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev)
return -EIO;
}
-static int i915_mutex_lock_interruptible(struct drm_device *dev)
+int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -163,75 +128,76 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
}
static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj_priv->gtt_space &&
- !obj_priv->active &&
- obj_priv->pin_count == 0;
+ return obj->gtt_space && !obj->active && obj->pin_count == 0;
}
-int i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long end)
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (start >= end ||
- (start & (PAGE_SIZE - 1)) != 0 ||
- (end & (PAGE_SIZE - 1)) != 0) {
- return -EINVAL;
- }
-
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
dev_priv->mm.gtt_total = end - start;
-
- return 0;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
}
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_init *args = data;
- int ret;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+ return -EINVAL;
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
+ i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ pinned = 0;
mutex_lock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
- args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size -pinned;
+
return 0;
}
-
/**
* Creates a new mm object and returns a handle to it.
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
u32 handle;
@@ -242,45 +208,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOMEM;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) {
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev->dev_private, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
kfree(obj);
return ret;
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
args->handle = handle;
return 0;
}
-static inline int
-fast_shmem_read(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj_priv->tiling_mode != I915_TILING_NONE;
+ obj->tiling_mode != I915_TILING_NONE;
}
static inline void
@@ -356,38 +305,51 @@ slow_shmem_bit17_copy(struct page *gpu_page,
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
*/
static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_read(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -398,30 +360,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
return 0;
}
-static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
-{
- int ret;
-
- ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
-
- /* If we've insufficient memory to map in the pages, attempt
- * to make some space by throwing out some old buffers.
- */
- if (ret == -ENOMEM) {
- struct drm_device *dev = obj->dev;
-
- ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
- if (ret)
- return ret;
-
- ret = i915_gem_object_get_pages(obj, 0);
- }
-
- return ret;
-}
-
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -429,18 +367,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
* and not take page faults.
*/
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
- int data_page_index, data_page_offset;
+ int shmem_page_offset;
+ int data_page_index, data_page_offset;
int page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
@@ -479,19 +418,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -502,8 +440,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
@@ -512,11 +455,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
} else {
slow_shmem_copy(user_pages[data_page_index],
data_page_offset,
- obj_priv->pages[shmem_page_index],
+ page,
shmem_page_offset,
page_length);
}
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -525,6 +471,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
out:
for (i = 0; i < pinned_pages; i++) {
SetPageDirty(user_pages[i]);
+ mark_page_accessed(user_pages[i]);
page_cache_release(user_pages[i]);
}
drm_free_large(user_pages);
@@ -539,11 +486,10 @@ out:
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pread *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (args->size == 0)
@@ -563,39 +509,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check source. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
-out_put:
- i915_gem_object_put_pages(obj);
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -645,32 +585,16 @@ slow_kernel_write(struct io_mapping *mapping,
io_mapping_unmap(dst_vaddr);
}
-static inline int
-fast_shmem_write(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -680,8 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -721,11 +644,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
*/
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -762,12 +685,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
goto out_unpin_pages;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin_pages;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin_pages;
+
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -813,39 +739,58 @@ out_unpin_pages:
* copy_from_user into the kmapped pages backing the object.
*/
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_write(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page, KM_USER0);
+ ret = __copy_from_user_inatomic(vaddr + page_offset,
+ user_data,
+ page_length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
+ */
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -864,17 +809,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* struct_mutex is held.
*/
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
+ int shmem_page_offset;
int data_page_index, data_page_offset;
int page_length;
int ret;
@@ -912,20 +858,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -936,21 +881,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length,
0);
} else {
- slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length);
}
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -974,8 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (args->size == 0)
@@ -995,15 +950,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check destination. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
@@ -1014,16 +969,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- obj_priv->gtt_space &&
- obj->write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0);
+ else if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin;
@@ -1034,26 +992,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
out_unpin:
i915_gem_object_unpin(obj);
} else {
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-
-out_put:
- i915_gem_object_put_pages(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1065,12 +1016,10 @@ unlock:
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
@@ -1095,28 +1044,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
-
- intel_mark_busy(dev, obj);
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
- /* Update the LRU on the fence for the CPU access that's
- * about to occur.
- */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
-
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
@@ -1127,11 +1063,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
- /* Maintain LRU order of "inactive" objects */
- if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1142,10 +1074,10 @@ unlock:
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_sw_finish *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1155,17 +1087,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
/* Pinned buffers may be scanout, so flush the cache */
- if (to_intel_bo(obj)->pin_count)
+ if (obj->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1180,8 +1112,9 @@ unlock:
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
loff_t offset;
@@ -1190,10 +1123,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ if (obj->size > dev_priv->mm.gtt_mappable_end) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -E2BIG;
+ }
+
offset = args->offset;
down_write(&current->mm->mmap_sem);
@@ -1228,10 +1166,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1243,27 +1180,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
- if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto unlock;
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (!obj->map_and_fenceable) {
+ ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
-
- /* Need a new fence register? */
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, true);
+ if (!obj->gtt_space) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
}
- if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+
+ if (obj->tiling_mode == I915_TILING_NONE)
+ ret = i915_gem_object_put_fence(obj);
+ else
+ ret = i915_gem_object_get_fence(obj, NULL, true);
+ if (ret)
+ goto unlock;
+
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ obj->fault_mappable = true;
- pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1272,11 +1217,12 @@ unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case -EAGAIN:
+ set_need_resched();
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
case -ENOMEM:
- case -EAGAIN:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
@@ -1295,37 +1241,39 @@ unlock:
* This routine allocates and attaches a fake offset for @obj.
*/
static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
/* Set the object up for mmap'ing */
- list = &obj->map_list;
+ list = &obj->base.map_list;
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
if (!list->map)
return -ENOMEM;
map = list->map;
map->type = _DRM_GEM;
- map->size = obj->size;
+ map->size = obj->base.size;
map->handle = obj;
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
+ obj->base.size / PAGE_SIZE,
+ 0, 0);
if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ DRM_ERROR("failed to allocate offset for bo %d\n",
+ obj->base.name);
ret = -ENOSPC;
goto out_free_list;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
+ obj->base.size / PAGE_SIZE,
+ 0);
if (!list->file_offset_node) {
ret = -ENOMEM;
goto out_free_list;
@@ -1338,16 +1286,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
goto out_free_mm;
}
- /* By now we should be all set, any drm_mmap request on the offset
- * below will get to our mmap & fault handler */
- obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
return 0;
out_free_mm:
drm_mm_put_block(list->file_offset_node);
out_free_list:
kfree(list->map);
+ list->map = NULL;
return ret;
}
@@ -1367,38 +1312,51 @@ out_free_list:
* fixup by i915_gem_fault().
*/
void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ if (!obj->fault_mappable)
+ return;
- if (dev->dev_mapping)
- unmap_mapping_range(dev->dev_mapping,
- obj_priv->mmap_offset, obj->size, 1);
+ unmap_mapping_range(obj->base.dev->dev_mapping,
+ (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+ obj->base.size, 1);
+
+ obj->fault_mappable = false;
}
static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
+ struct drm_map_list *list = &obj->base.map_list;
- list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+}
- if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
- list->file_offset_node = NULL;
- }
+static uint32_t
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t size;
- if (list->map) {
- kfree(list->map);
- list->map = NULL;
- }
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return obj->base.size;
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
- obj_priv->mmap_offset = 0;
+ return size;
}
/**
@@ -1406,42 +1364,68 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
* @obj: object to check
*
* Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
*/
static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int start, i;
+ struct drm_device *dev = obj->base.dev;
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (INTEL_INFO(dev)->gen == 3)
- start = 1024*1024;
- else
- start = 512*1024;
+ return i915_gem_get_gtt_size(obj);
+}
+
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ * unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+static uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ int tile_height;
- for (i = start; i < obj->size; i <<= 1)
- ;
+ /*
+ * Minimum alignment is 4k (GTT page size) for sane hw.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return 4096;
- return i;
+ /*
+ * Older chips need unfenced tiled buffers to be aligned to the left
+ * edge of an even tile row (where tile rows are counted as if the bo is
+ * placed in a fenced gtt region).
+ */
+ if (IS_GEN2(dev) ||
+ (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_height = 32;
+ else
+ tile_height = 8;
+
+ return tile_height * obj->stride * 2;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1454,11 +1438,11 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap_gtt *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1468,130 +1452,196 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ ret = -E2BIG;
+ goto unlock;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (!obj_priv->mmap_offset) {
+ if (!obj->base.map_list.map) {
ret = i915_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- args->offset = obj_priv->mmap_offset;
-
- /*
- * Pull it into the GTT so that we have a page list (makes the
- * initial fault faster and any subsequent flushing possible).
- */
- if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto out;
- }
+ args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask)
+{
+ int page_count, i;
+ struct address_space *mapping;
+ struct inode *inode;
+ struct page *page;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->base.size / PAGE_SIZE;
+ BUG_ON(obj->pages != NULL);
+ obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+ if (obj->pages == NULL)
+ return -ENOMEM;
+
+ inode = obj->base.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ for (i = 0; i < page_count; i++) {
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER |
+ __GFP_COLD |
+ __GFP_RECLAIMABLE |
+ gfpmask);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ obj->pages[i] = page;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj->pages[i]);
+
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return PTR_ERR(page);
+}
+
static void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size / PAGE_SIZE;
+ int page_count = obj->base.size / PAGE_SIZE;
int i;
- BUG_ON(obj_priv->pages_refcount == 0);
- BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
-
- if (--obj_priv->pages_refcount != 0)
- return;
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- if (obj_priv->madv == I915_MADV_DONTNEED)
- obj_priv->dirty = 0;
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj->dirty)
+ set_page_dirty(obj->pages[i]);
- if (obj_priv->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj_priv->pages[i]);
+ if (obj->madv == I915_MADV_WILLNEED)
+ mark_page_accessed(obj->pages[i]);
- page_cache_release(obj_priv->pages[i]);
+ page_cache_release(obj->pages[i]);
}
- obj_priv->dirty = 0;
+ obj->dirty = 0;
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
}
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- ring->outstanding_lazy_request = true;
- return dev_priv->next_seqno;
-}
-
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
BUG_ON(ring == NULL);
- obj_priv->ring = ring;
+ obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
- if (!obj_priv->active) {
- drm_gem_object_reference(obj);
- obj_priv->active = 1;
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
- list_move_tail(&obj_priv->ring_list, &ring->active_list);
- obj_priv->last_rendering_seqno = seqno;
+ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+ if (obj->fenced_gpu_access) {
+ struct drm_i915_fence_reg *reg;
+
+ BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ }
}
static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
+}
+
+static void
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
- list_del_init(&obj_priv->ring_list);
- obj_priv->last_rendering_seqno = 0;
+ BUG_ON(!obj->active);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+
+ i915_gem_object_move_off_active(obj);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (obj->pin_count != 0)
+ list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+ else
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ BUG_ON(!list_empty(&obj->gpu_write_list));
+ BUG_ON(!obj->active);
+ obj->ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ obj->pending_gpu_write = false;
+ drm_gem_object_unreference(&obj->base);
+
+ WARN_ON(i915_verify_lists(dev));
}
/* Immediately discard the backing storage */
static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
/* Our goal here is to return as much of the memory as
@@ -1600,42 +1650,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
* backing pages, *now*. Here we mirror the actions taken
* when by shmem_delete_inode() to release the backing store.
*/
- inode = obj->filp->f_path.dentry->d_inode;
+ inode = obj->base.filp->f_path.dentry->d_inode;
truncate_inode_pages(inode->i_mapping, 0);
if (inode->i_op->truncate_range)
inode->i_op->truncate_range(inode, 0, (loff_t)-1);
- obj_priv->madv = __I915_MADV_PURGED;
+ obj->madv = __I915_MADV_PURGED;
}
static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
- return obj_priv->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count != 0)
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- list_del_init(&obj_priv->ring_list);
-
- BUG_ON(!list_empty(&obj_priv->gpu_write_list));
-
- obj_priv->last_rendering_seqno = 0;
- obj_priv->ring = NULL;
- if (obj_priv->active) {
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
- }
- WARN_ON(i915_verify_lists(dev));
+ return obj->madv == I915_MADV_DONTNEED;
}
static void
@@ -1643,37 +1669,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv, *next;
+ struct drm_i915_gem_object *obj, *next;
- list_for_each_entry_safe(obj_priv, next,
+ list_for_each_entry_safe(obj, next,
&ring->gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = &obj_priv->base;
+ if (obj->base.write_domain & flush_domains) {
+ uint32_t old_write_domain = obj->base.write_domain;
- if (obj->write_domain & flush_domains) {
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring);
-
- /* update the fence lru list */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_active(obj, ring,
+ i915_gem_next_request_seqno(dev, ring));
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
}
}
-uint32_t
+int
i915_add_request(struct drm_device *dev,
struct drm_file *file,
struct drm_i915_gem_request *request,
@@ -1683,17 +1699,17 @@ i915_add_request(struct drm_device *dev,
struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
+ int ret;
+
+ BUG_ON(request == NULL);
if (file != NULL)
file_priv = file->driver_priv;
- if (request == NULL) {
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
- }
+ ret = ring->add_request(ring, &seqno);
+ if (ret)
+ return ret;
- seqno = ring->add_request(dev, ring, 0);
ring->outstanding_lazy_request = false;
request->seqno = seqno;
@@ -1717,26 +1733,7 @@ i915_add_request(struct drm_device *dev,
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
- return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-static void
-i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
-{
- uint32_t flush_domains = 0;
-
- /* The sampler always gets flushed on i965 (sigh) */
- if (INTEL_INFO(dev)->gen >= 4)
- flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-
- ring->flush(dev, ring,
- I915_GEM_DOMAIN_COMMAND, flush_domains);
+ return 0;
}
static inline void
@@ -1769,62 +1766,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+static void i915_gem_reset_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ struct drm_i915_gem_object *obj = reg->obj;
+
+ if (!obj)
+ continue;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ reg->obj->fence_reg = I915_FENCE_REG_NONE;
+ reg->obj->fenced_gpu_access = false;
+ reg->obj->last_fenced_seqno = 0;
+ reg->obj->last_fenced_ring = NULL;
+ i915_gem_clear_fence_reg(dev, reg);
}
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int i;
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
* lost bo to the inactive list.
*/
while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
+ obj= list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ mm_list);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
}
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj_priv,
+ list_for_each_entry(obj,
&dev_priv->mm.inactive_list,
mm_list)
{
- obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
/* The fence registers are invalidated so clear them out */
- for (i = 0; i < 16; i++) {
- struct drm_i915_fence_reg *reg;
-
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- continue;
-
- i915_gem_clear_fence_reg(reg->obj);
- }
+ i915_gem_reset_fences(dev);
}
/**
@@ -1836,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
+ int i;
if (!ring->status_page.page_addr ||
list_empty(&ring->request_list))
@@ -1843,7 +1855,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
WARN_ON(i915_verify_lists(dev));
- seqno = ring->get_seqno(dev, ring);
+ seqno = ring->get_seqno(ring);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (seqno >= ring->sync_seqno[i])
+ ring->sync_seqno[i] = 0;
+
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -1865,18 +1882,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ obj= list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
break;
- obj = &obj_priv->base;
- if (obj->write_domain != 0)
+ if (obj->base.write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
@@ -1884,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- ring->user_irq_put(dev, ring);
+ ring->irq_put(ring);
dev_priv->trace_irq_seqno = 0;
}
@@ -1895,24 +1910,24 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj_priv, *tmp;
+ struct drm_i915_gem_object *obj, *next;
/* We must be careful that during unbind() we do not
* accidentally infinitely recurse into retire requests.
* Currently:
* retire -> free -> unbind -> wait -> retire_ring
*/
- list_for_each_entry_safe(obj_priv, tmp,
+ list_for_each_entry_safe(obj, next,
&dev_priv->mm.deferred_free_list,
mm_list)
- i915_gem_free_object_tail(&obj_priv->base);
+ i915_gem_free_object_tail(obj);
}
- i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
}
static void
@@ -1934,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
- (!list_empty(&dev_priv->render_ring.request_list) ||
- !list_empty(&dev_priv->bsd_ring.request_list) ||
- !list_empty(&dev_priv->blt_ring.request_list)))
+ (!list_empty(&dev_priv->ring[RCS].request_list) ||
+ !list_empty(&dev_priv->ring[VCS].request_list) ||
+ !list_empty(&dev_priv->ring[BCS].request_list)))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
@@ -1954,14 +1969,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
- if (ring->outstanding_lazy_request) {
- seqno = i915_add_request(dev, NULL, NULL, ring);
- if (seqno == 0)
+ if (seqno == ring->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
return -ENOMEM;
+
+ ret = i915_add_request(dev, NULL, request, ring);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
}
- BUG_ON(seqno == dev_priv->next_seqno);
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1975,21 +1999,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
trace_i915_gem_request_wait_begin(dev, seqno);
- ring->waiting_gem_seqno = seqno;
- ring->user_irq_get(dev, ring);
- if (interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
+ ring->waiting_seqno = seqno;
+ if (ring->irq_get(ring)) {
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
- ring->waiting_gem_seqno = 0;
+ ring->irq_put(ring);
+ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
+ ret = -EBUSY;
+ ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
@@ -1998,7 +2024,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_seqno(dev, ring),
+ __func__, ret, seqno, ring->get_seqno(ring),
dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
@@ -2023,70 +2049,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
return i915_do_wait_request(dev, seqno, 1, ring);
}
-static void
-i915_gem_flush_ring(struct drm_device *dev,
- struct drm_file *file_priv,
- struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- ring->flush(dev, ring, invalidate_domains, flush_domains);
- i915_gem_process_flushing_list(dev, flush_domains, ring);
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
- struct drm_file *file_priv,
- uint32_t invalidate_domains,
- uint32_t flush_domains,
- uint32_t flush_rings)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
-
- if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
- if (flush_rings & RING_RENDER)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->render_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BSD)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->bsd_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BLT)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->blt_ring,
- invalidate_domains, flush_domains);
- }
-}
-
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
- BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
+ BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
- if (obj_priv->active) {
+ if (obj->active) {
ret = i915_do_wait_request(dev,
- obj_priv->last_rendering_seqno,
+ obj->last_rendering_seqno,
interruptible,
- obj_priv->ring);
+ obj->ring);
if (ret)
return ret;
}
@@ -2098,17 +2084,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
* Unbinds an object from the GTT aperture.
*/
int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return 0;
- if (obj_priv->pin_count != 0) {
+ if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL;
}
@@ -2131,27 +2114,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
*/
if (ret) {
i915_gem_clflush_object(obj);
- obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
/* release the fence reg _after_ flushing */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ ret = i915_gem_object_put_fence(obj);
+ if (ret == -ERESTARTSYS)
+ return ret;
- i915_gem_object_put_pages(obj);
- BUG_ON(obj_priv->pages_refcount);
+ i915_gem_gtt_unbind_object(obj);
+ i915_gem_object_put_pages_gtt(obj);
- i915_gem_info_remove_gtt(dev_priv, obj->size);
- list_del_init(&obj_priv->mm_list);
+ list_del_init(&obj->gtt_list);
+ list_del_init(&obj->mm_list);
+ /* Avoid an unnecessary call to unbind on rebind. */
+ obj->map_and_fenceable = true;
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- obj_priv->gtt_offset = 0;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj_priv))
+ if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
trace_i915_gem_object_unbind(obj);
@@ -2159,14 +2142,25 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+void
+i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ ring->flush(ring, invalidate_domains, flush_domains);
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
- i915_gem_flush_ring(dev, NULL, ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (!list_empty(&ring->gpu_write_list))
+ i915_gem_flush_ring(dev, ring,
+ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
@@ -2177,7 +2171,7 @@ i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- int ret;
+ int ret, i;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
@@ -2185,258 +2179,296 @@ i915_gpu_idle(struct drm_device *dev)
return 0;
/* Flush everything onto the inactive list. */
- ret = i915_ring_idle(dev, &dev_priv->render_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->blt_ring);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count, i;
- struct address_space *mapping;
- struct inode *inode;
- struct page *page;
-
- BUG_ON(obj_priv->pages_refcount
- == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
-
- if (obj_priv->pages_refcount++ != 0)
- return 0;
-
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->pages != NULL);
- obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
- if (obj_priv->pages == NULL) {
- obj_priv->pages_refcount--;
- return -ENOMEM;
- }
-
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
- if (IS_ERR(page))
- goto err_pages;
-
- obj_priv->pages[i] = page;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+ if (ret)
+ return ret;
}
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_do_bit_17_swizzle(obj);
-
return 0;
-
-err_pages:
- while (i--)
- page_cache_release(obj_priv->pages[i]);
-
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
- obj_priv->pages_refcount--;
- return PTR_ERR(page);
}
-static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
- 0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+
+ return 0;
}
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+
+ return 0;
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ u32 fence_reg, val, pitch_val;
int tile_width;
- uint32_t fence_reg, val;
- uint32_t pitch_val;
- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
- __func__, obj_priv->gtt_offset, obj->size);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size))
+ return -EINVAL;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
/* Note: pitch better be a power of two tile widths */
- pitch_val = obj_priv->stride / tile_width;
+ pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- else
- WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
-
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(obj->size);
+ val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- if (regnum < 8)
- fence_reg = FENCE_REG_830_0 + (regnum * 4);
+ fence_reg = obj->fence_reg;
+ if (fence_reg < 8)
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
else
- fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
- I915_WRITE(fence_reg, val);
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, fence_reg);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(fence_reg, val);
+
+ return 0;
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint32_t val;
uint32_t pitch_val;
- uint32_t fence_size_bits;
- if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
- __func__, obj_priv->gtt_offset);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size))
+ return -EINVAL;
- pitch_val = obj_priv->stride / 128;
+ pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
- WARN_ON(fence_size_bits & ~0x00000f00);
- val |= fence_size_bits;
+ val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+
+ return 0;
}
-static int i915_find_fence_reg(struct drm_device *dev,
- bool interruptible)
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
+{
+ int ret;
+
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev,
+ obj->last_fenced_ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+
+ if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ obj->last_fenced_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ obj->last_fenced_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ obj->last_fenced_seqno = 0;
+ obj->last_fenced_ring = NULL;
+ }
+
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ ret = i915_gem_object_flush_fence(obj, NULL, true);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ i915_gem_clear_fence_reg(obj->base.dev,
+ &dev_priv->fence_regs[obj->fence_reg]);
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ }
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *obj_priv = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = NULL;
- int i, avail, ret;
+ struct drm_i915_fence_reg *reg, *first, *avail;
+ int i;
/* First try to find a free reg */
- avail = 0;
+ avail = NULL;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
- return i;
+ return reg;
- obj_priv = to_intel_bo(reg->obj);
- if (!obj_priv->pin_count)
- avail++;
+ if (!reg->obj->pin_count)
+ avail = reg;
}
- if (avail == 0)
- return -ENOSPC;
+ if (avail == NULL)
+ return NULL;
/* None available, try to steal one or wait for a user to finish */
- i = I915_FENCE_REG_NONE;
- list_for_each_entry(reg, &dev_priv->mm.fence_list,
- lru_list) {
- obj = reg->obj;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count)
+ avail = first = NULL;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->obj->pin_count)
continue;
- /* found one! */
- i = obj_priv->fence_reg;
- break;
- }
+ if (first == NULL)
+ first = reg;
- BUG_ON(i == I915_FENCE_REG_NONE);
+ if (!pipelined ||
+ !reg->obj->last_fenced_ring ||
+ reg->obj->last_fenced_ring == pipelined) {
+ avail = reg;
+ break;
+ }
+ }
- /* We only have a reference on obj from the active list. put_fence_reg
- * might drop that one, causing a use-after-free in it. So hold a
- * private reference to obj like the other callers of put_fence_reg
- * (set_tiling ioctl) do. */
- drm_gem_object_reference(obj);
- ret = i915_gem_object_put_fence_reg(obj, interruptible);
- drm_gem_object_unreference(obj);
- if (ret != 0)
- return ret;
+ if (avail == NULL)
+ avail = first;
- return i;
+ return avail;
}
/**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up a fence reg for an object
* @obj: object to map through a fence reg
+ * @pipelined: ring on which to queue the change, or NULL for CPU access
+ * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
@@ -2448,72 +2480,138 @@ static int i915_find_fence_reg(struct drm_device *dev,
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_fence_reg *reg;
int ret;
- /* Just update our place in the LRU if our fence is getting used. */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ /* XXX disable pipelining. There are bugs. Shocking. */
+ pipelined = NULL;
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (!pipelined) {
+ if (reg->setup_seqno) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ reg->setup_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ reg->setup_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ reg->setup_seqno = 0;
+ }
+ } else if (obj->last_fenced_ring &&
+ obj->last_fenced_ring != pipelined) {
+ ret = i915_gem_object_flush_fence(obj,
+ pipelined,
+ interruptible);
+ if (ret)
+ return ret;
+ } else if (obj->tiling_changed) {
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+ }
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+ BUG_ON(!pipelined && reg->setup_seqno);
+
+ if (obj->tiling_changed) {
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(dev, pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+ goto update;
+ }
+
return 0;
}
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- WARN(1, "allocating a fence for non-tiled object?\n");
- break;
- case I915_TILING_X:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (512 - 1)),
- "object 0x%08x is X tiled but has non-512B pitch\n",
- obj_priv->gtt_offset);
- break;
- case I915_TILING_Y:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (128 - 1)),
- "object 0x%08x is Y tiled but has non-128B pitch\n",
- obj_priv->gtt_offset);
- break;
- }
+ reg = i915_find_fence_reg(dev, pipelined);
+ if (reg == NULL)
+ return -ENOSPC;
- ret = i915_find_fence_reg(dev, interruptible);
- if (ret < 0)
+ ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ if (ret)
return ret;
- obj_priv->fence_reg = ret;
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ drm_gem_object_reference(&old->base);
+
+ if (old->tiling_mode)
+ i915_gem_release_mmap(old);
+
+ ret = i915_gem_object_flush_fence(old,
+ pipelined,
+ interruptible);
+ if (ret) {
+ drm_gem_object_unreference(&old->base);
+ return ret;
+ }
+
+ if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+ pipelined = NULL;
+
+ old->fence_reg = I915_FENCE_REG_NONE;
+ old->last_fenced_ring = pipelined;
+ old->last_fenced_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+
+ drm_gem_object_unreference(&old->base);
+ } else if (obj->last_fenced_seqno == 0)
+ pipelined = NULL;
reg->obj = obj;
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ obj->fence_reg = reg - dev_priv->fence_regs;
+ obj->last_fenced_ring = pipelined;
+ reg->setup_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ obj->last_fenced_seqno = reg->setup_seqno;
+
+update:
+ obj->tiling_changed = false;
switch (INTEL_INFO(dev)->gen) {
case 6:
- sandybridge_write_fence_reg(reg);
+ ret = sandybridge_write_fence_reg(obj, pipelined);
break;
case 5:
case 4:
- i965_write_fence_reg(reg);
+ ret = i965_write_fence_reg(obj, pipelined);
break;
case 3:
- i915_write_fence_reg(reg);
+ ret = i915_write_fence_reg(obj, pipelined);
break;
case 2:
- i830_write_fence_reg(reg);
+ ret = i830_write_fence_reg(obj, pipelined);
break;
}
- trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
- obj_priv->tiling_mode);
-
- return 0;
+ return ret;
}
/**
@@ -2521,154 +2619,127 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
* @obj: object to clear
*
* Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
*/
static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg)
{
- struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- uint32_t fence_reg;
+ uint32_t fence_reg = reg - dev_priv->fence_regs;
switch (INTEL_INFO(dev)->gen) {
case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
- (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
break;
case 5:
case 4:
- I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
break;
case 3:
- if (obj_priv->fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
+ if (fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
else
case 2:
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
I915_WRITE(fence_reg, 0);
break;
}
- reg->obj = NULL;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&reg->lru_list);
-}
-
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- * @bool: whether the wait upon the fence is interruptible
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg;
-
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
- return 0;
-
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
- /* On the i915, GPU access to tiled buffers is via a fence,
- * therefore we must wait for any outstanding access to complete
- * before clearing the fence.
- */
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- if (reg->gpu) {
- int ret;
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj, interruptible);
- if (ret)
- return ret;
-
- reg->gpu = false;
- }
-
- i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg(obj);
-
- return 0;
+ reg->obj = NULL;
+ reg->setup_seqno = 0;
}
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ u32 size, fence_size, fence_alignment, unfenced_alignment;
+ bool mappable, fenceable;
int ret;
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
}
+ fence_size = i915_gem_get_gtt_size(obj);
+ fence_alignment = i915_gem_get_gtt_alignment(obj);
+ unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+
if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+ alignment = map_and_fenceable ? fence_alignment :
+ unfenced_alignment;
+ if (map_and_fenceable && alignment & (fence_alignment - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL;
}
+ size = map_and_fenceable ? fence_size : obj->base.size;
+
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->size > dev_priv->mm.gtt_total) {
+ if (obj->base.size >
+ (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- obj->size, alignment, 0);
- if (free_space != NULL)
- obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- alignment);
- if (obj_priv->gtt_space == NULL) {
+ if (map_and_fenceable)
+ free_space =
+ drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ size, alignment, 0);
+
+ if (free_space != NULL) {
+ if (map_and_fenceable)
+ obj->gtt_space =
+ drm_mm_get_block_range_generic(free_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ obj->gtt_space =
+ drm_mm_get_block(free_space, size, alignment);
+ }
+ if (obj->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, size, alignment,
+ map_and_fenceable);
if (ret)
return ret;
goto search_free;
}
- ret = i915_gem_object_get_pages(obj, gfpmask);
+ ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
if (ret) {
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ ret = i915_gem_evict_something(dev, size,
+ alignment,
+ map_and_fenceable);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2685,122 +2756,113 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return ret;
}
- /* Create an AGP memory structure pointing at our pages, and bind it
- * into the GTT.
- */
- obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->pages,
- obj->size >> PAGE_SHIFT,
- obj_priv->gtt_space->start,
- obj_priv->agp_type);
- if (obj_priv->agp_mem == NULL) {
- i915_gem_object_put_pages(obj);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
-
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_gtt_bind_object(obj);
+ if (ret) {
+ i915_gem_object_put_pages_gtt(obj);
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+
+ ret = i915_gem_evict_something(dev, size,
+ alignment, map_and_fenceable);
if (ret)
return ret;
goto search_free;
}
- /* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- i915_gem_info_add_gtt(dev_priv, obj->size);
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ obj->gtt_offset = obj->gtt_space->start;
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+ fenceable =
+ obj->gtt_space->size == fence_size &&
+ (obj->gtt_space->start & (fence_alignment -1)) == 0;
+ mappable =
+ obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+
+ obj->map_and_fenceable = mappable && fenceable;
+
+ trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
return 0;
}
void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj_priv->pages == NULL)
+ if (obj->pages == NULL)
return;
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+ drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+static void
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- uint32_t old_write_domain;
+ struct drm_device *dev = obj->base.dev;
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
+ if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return;
/* Queue the GPU write cache flushing we need. */
- old_write_domain = obj->write_domain;
- i915_gem_flush_ring(dev, NULL,
- to_intel_bo(obj)->ring,
- 0, obj->write_domain);
- BUG_ON(obj->write_domain);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
-
- return 0;
+ i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+ BUG_ON(obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ i915_gem_release_mmap(obj);
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ intel_gtt_chipset_flush();
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
@@ -2811,37 +2873,36 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
- return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
+ if (obj->pending_gpu_write || write) {
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
i915_gem_object_flush_cpu_write_domain(obj);
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
}
trace_i915_gem_object_change_domain(obj,
@@ -2856,23 +2917,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined)
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
/* Currently, we are always called from an non-interruptible context. */
- if (!pipelined) {
+ if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -2880,12 +2938,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
i915_gem_object_flush_cpu_write_domain(obj);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
+ obj->base.write_domain);
return 0;
}
@@ -2898,10 +2956,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
+ i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
- return i915_gem_object_wait_rendering(&obj->base, interruptible);
+ return i915_gem_object_wait_rendering(obj, interruptible);
}
/**
@@ -2911,14 +2969,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
* flushes to occur.
*/
static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
@@ -2930,27 +2986,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
trace_i915_gem_object_change_domain(obj,
@@ -2960,184 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
return 0;
}
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t invalidate_domains = 0;
- uint32_t flush_domains = 0;
- uint32_t old_read_domains;
-
- intel_mark_busy(dev, obj);
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->pending_write_domain == 0)
- obj->pending_read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain &&
- (obj->write_domain != obj->pending_read_domains ||
- obj_priv->ring != ring)) {
- flush_domains |= obj->write_domain;
- invalidate_domains |=
- obj->pending_read_domains & ~obj->write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- old_read_domains = obj->read_domains;
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->pending_write_domain == 0)
- obj->pending_write_domain = obj->write_domain;
- obj->read_domains = obj->pending_read_domains;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= obj_priv->ring->id;
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= ring->id;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->write_domain);
-}
-
/**
* Moves the object from a partially CPU read to a full one.
*
@@ -3145,30 +3023,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (!obj_priv->page_cpu_valid)
+ if (!obj->page_cpu_valid)
return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
- if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
int i;
- for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
- if (obj_priv->page_cpu_valid[i])
+ for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
}
}
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
- kfree(obj_priv->page_cpu_valid);
- obj_priv->page_cpu_valid = NULL;
+ kfree(obj->page_cpu_valid);
+ obj->page_cpu_valid = NULL;
}
/**
@@ -3184,19 +3060,16 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
- if (offset == 0 && size == obj->size)
+ if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
@@ -3204,457 +3077,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
- if (obj_priv->page_cpu_valid == NULL &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ if (obj->page_cpu_valid == NULL &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0;
/* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU
*/
- if (obj_priv->page_cpu_valid == NULL) {
- obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj_priv->page_cpu_valid == NULL)
+ if (obj->page_cpu_valid == NULL) {
+ obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
+ GFP_KERNEL);
+ if (obj->page_cpu_valid == NULL)
return -ENOMEM;
- } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+ } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) {
- if (obj_priv->page_cpu_valid[i])
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
- obj_priv->page_cpu_valid[i] = 1;
+ obj->page_cpu_valid[i] = 1;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *reloc)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_gem_object *target_obj;
- uint32_t target_offset;
- int ret = -EINVAL;
-
- target_obj = drm_gem_object_lookup(dev, file_priv,
- reloc->target_handle);
- if (target_obj == NULL)
- return -ENOENT;
-
- target_offset = to_intel_bo(target_obj)->gtt_offset;
-
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_offset == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc->target_handle);
- goto err;
- }
-
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc->write_domain & (reloc->write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- goto err;
- }
- if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
- reloc->read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- goto err;
- }
- if (reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- goto err;
- }
-
- target_obj->pending_read_domains |= reloc->read_domains;
- target_obj->pending_write_domain |= reloc->write_domain;
-
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_offset == reloc->presumed_offset)
- goto out;
-
- /* Check that the relocation address is valid... */
- if (reloc->offset > obj->base.size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- (int) obj->base.size);
- goto err;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- goto err;
- }
-
- /* and points to somewhere within the target object. */
- if (reloc->delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta,
- (int) target_obj->size);
- goto err;
- }
-
- reloc->delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- uint32_t page_offset = reloc->offset & ~PAGE_MASK;
- char *vaddr;
-
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
- *(uint32_t *)(vaddr + page_offset) = reloc->delta;
- kunmap_atomic(vaddr);
- } else {
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
-
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
- if (ret)
- goto err;
-
- /* Map the page containing the relocation we're going to perform. */
- reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc->offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc->offset & ~PAGE_MASK));
- iowrite32(reloc->delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
-
- /* and update the user's relocation entry */
- reloc->presumed_offset = target_offset;
-
-out:
- ret = 0;
-err:
- drm_gem_object_unreference(target_obj);
- return ret;
-}
-
-static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry)
-{
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- int i, ret;
-
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
-
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc)))
- return -EFAULT;
-
- ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
- if (ret)
- return ret;
-
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *relocs)
-{
- int i, ret;
-
- for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- int i, ret;
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate_object(obj, file,
- &exec_list[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_reserve(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i, retry;
-
- /* attempt to pin all of the buffers into the GTT */
- for (retry = 0; retry < 2; retry++) {
- ret = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
- struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
- bool need_fence =
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
-
- /* Check fence reg constraints and rebind if necessary */
- if (need_fence &&
- !i915_gem_object_fence_offset_ok(&obj->base,
- obj->tiling_mode)) {
- ret = i915_gem_object_unbind(&obj->base);
- if (ret)
- break;
- }
-
- ret = i915_gem_object_pin(&obj->base, entry->alignment);
- if (ret)
- break;
-
- /*
- * Pre-965 chips need a fence register set up in order
- * to properly handle blits to/from tiled surfaces.
- */
- if (need_fence) {
- ret = i915_gem_object_get_fence_reg(&obj->base, true);
- if (ret) {
- i915_gem_object_unpin(&obj->base);
- break;
- }
-
- dev_priv->fence_regs[obj->fence_reg].gpu = true;
- }
-
- entry->offset = obj->gtt_offset;
- }
-
- while (i--)
- i915_gem_object_unpin(object_list[i]);
-
- if (ret == 0)
- break;
-
- if (ret != -ENOSPC || retry)
- return ret;
-
- ret = i915_gem_evict_everything(dev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- struct drm_i915_gem_relocation_entry *reloc;
- int i, total, ret;
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->in_execbuffer = false;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- total = 0;
- for (i = 0; i < count; i++)
- total += exec_list[i].relocation_count;
-
- reloc = drm_malloc_ab(total, sizeof(*reloc));
- if (reloc == NULL) {
- mutex_lock(&dev->struct_mutex);
- return -ENOMEM;
- }
-
- total = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
-
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-
- if (copy_from_user(reloc+total, user_relocs,
- exec_list[i].relocation_count *
- sizeof(*reloc))) {
- ret = -EFAULT;
- mutex_lock(&dev->struct_mutex);
- goto err;
- }
-
- total += exec_list[i].relocation_count;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- goto err;
- }
-
- ret = i915_gem_execbuffer_reserve(dev, file,
- object_list, exec_list,
- count);
- if (ret)
- goto err;
-
- total = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
- &exec_list[i],
- reloc + total);
- if (ret)
- goto err;
-
- total += exec_list[i].relocation_count;
- }
-
- /* Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- drm_free_large(reloc);
- return ret;
-}
-
-static int
-i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
- struct drm_file *file,
- struct intel_ring_buffer *ring,
- struct drm_gem_object **objects,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i;
-
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- dev_priv->mm.flush_rings = 0;
- for (i = 0; i < count; i++)
- i915_gem_object_set_to_gpu_domain(objects[i], ring);
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev, file,
- dev->invalidate_domains,
- dev->flush_domains,
- dev_priv->mm.flush_rings);
- }
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
- /* XXX replace with semaphores */
- if (obj->ring && ring != obj->ring) {
- ret = i915_gem_object_wait_rendering(&obj->base, true);
- if (ret)
- return ret;
- }
- }
+ obj->base.write_domain);
return 0;
}
@@ -3694,599 +3155,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return 0;
ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
/* And wait for the seqno passing without holding any locks and
* causing extra latency for others. This is safe as the irq
* generation is designed to be run atomically and so is
* lockless.
*/
- ring->user_irq_get(dev, ring);
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- }
-
- if (ret == 0)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
- return ret;
-}
-
-static int
-i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
- uint64_t exec_offset)
-{
- uint32_t exec_start, exec_len;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- if ((exec_start | exec_len) & 0x7)
- return -EINVAL;
-
- if (!exec_start)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
- int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
- int length; /* limited by fault_in_pages_readable() */
-
- /* First check for malicious input causing overflow */
- if (exec[i].relocation_count >
- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
- return -EINVAL;
-
- length = exec[i].relocation_count *
- sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, ptr, length))
- return -EFAULT;
-
- /* we may also need to update the presumed offsets */
- if (!access_ok(VERIFY_WRITE, ptr, length))
- return -EFAULT;
-
- if (fault_in_pages_readable(ptr, length))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec_list)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object **object_list = NULL;
- struct drm_gem_object *batch_obj;
- struct drm_i915_gem_object *obj_priv;
- struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_request *request = NULL;
- int ret, i, flips;
- uint64_t exec_offset;
-
- struct intel_ring_buffer *ring = NULL;
-
- ret = i915_gem_check_is_wedged(dev);
- if (ret)
- return ret;
-
- ret = validate_exec_list(exec_list, args->buffer_count);
- if (ret)
- return ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
- switch (args->flags & I915_EXEC_RING_MASK) {
- case I915_EXEC_DEFAULT:
- case I915_EXEC_RENDER:
- ring = &dev_priv->render_ring;
- break;
- case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
- ring = &dev_priv->bsd_ring;
- break;
- case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_ERROR("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
- ring = &dev_priv->blt_ring;
- break;
- default:
- DRM_ERROR("execbuf with unknown ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
- object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (object_list == NULL) {
- DRM_ERROR("Failed to allocate object list for %d buffers\n",
- args->buffer_count);
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (args->num_cliprects != 0) {
- cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = copy_from_user(cliprects,
- (struct drm_clip_rect __user *)
- (uintptr_t) args->cliprects_ptr,
- sizeof(*cliprects) * args->num_cliprects);
- if (ret != 0) {
- DRM_ERROR("copy %d cliprects failed: %d\n",
- args->num_cliprects, ret);
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- }
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto pre_mutex_err;
-
- if (dev_priv->mm.suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
- /* Look up object handles */
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file,
- exec_list[i].handle);
- if (object_list[i] == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -ENOENT;
- goto err;
- }
-
- obj_priv = to_intel_bo(object_list[i]);
- if (obj_priv->in_execbuffer) {
- DRM_ERROR("Object %p appears more than once in object list\n",
- object_list[i]);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -EINVAL;
- goto err;
- }
- obj_priv->in_execbuffer = true;
- }
-
- /* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_reserve(dev, file,
- object_list, exec_list,
- args->buffer_count);
- if (ret)
- goto err;
-
- /* The objects are in their final locations, apply the relocations. */
- ret = i915_gem_execbuffer_relocate(dev, file,
- object_list, exec_list,
- args->buffer_count);
- if (ret) {
- if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, file,
- object_list,
- exec_list,
- args->buffer_count);
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- }
- if (ret)
- goto err;
- }
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- batch_obj = object_list[args->buffer_count-1];
- if (batch_obj->pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
- ret = -EINVAL;
- goto err;
- }
- batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
- /* Sanity check the batch buffer */
- exec_offset = to_intel_bo(batch_obj)->gtt_offset;
- ret = i915_gem_check_execbuffer(args, exec_offset);
- if (ret != 0) {
- DRM_ERROR("execbuf with invalid offset/length\n");
- goto err;
- }
-
- ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
- object_list, args->buffer_count);
- if (ret)
- goto err;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- uint32_t old_write_domain = obj->write_domain;
- obj->write_domain = obj->pending_write_domain;
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
-#if WATCH_COHERENCY
- for (i = 0; i < args->buffer_count; i++) {
- i915_gem_object_check_coherency(object_list[i],
- exec_list[i].handle);
- }
-#endif
-
-#if WATCH_EXEC
- i915_gem_dump_object(batch_obj,
- args->batch_len,
- __func__,
- ~0);
-#endif
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
- flips = 0;
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]->write_domain)
- flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
- }
- if (flips) {
- int plane, flip_mask;
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- }
-
- /* Exec the batchbuffer */
- ret = ring->dispatch_gem_execbuffer(dev, ring, args,
- cliprects, exec_offset);
- if (ret) {
- DRM_ERROR("dispatch failed %d\n", ret);
- goto err;
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- i915_retire_commands(dev, ring);
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- i915_gem_object_move_to_active(obj, ring);
- if (obj->write_domain)
- list_move_tail(&to_intel_bo(obj)->gpu_write_list,
- &ring->gpu_write_list);
- }
-
- i915_add_request(dev, file, request, ring);
- request = NULL;
-
-err:
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]) {
- obj_priv = to_intel_bo(object_list[i]);
- obj_priv->in_execbuffer = false;
- }
- drm_gem_object_unreference(object_list[i]);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
- drm_free_large(object_list);
- kfree(cliprects);
- kfree(request);
-
- return ret;
-}
-
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
-int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_execbuffer2 exec2;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret, i;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -ENOMEM;
- }
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- for (i = 0; i < args->buffer_count; i++) {
- exec2_list[i].handle = exec_list[i].handle;
- exec2_list[i].relocation_count = exec_list[i].relocation_count;
- exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
- exec2_list[i].alignment = exec_list[i].alignment;
- exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
- exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
- else
- exec2_list[i].flags = 0;
- }
+ if (ring->irq_get(ring)) {
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->irq_put(ring);
- exec2.buffers_ptr = args->buffers_ptr;
- exec2.buffer_count = args->buffer_count;
- exec2.batch_start_offset = args->batch_start_offset;
- exec2.batch_len = args->batch_len;
- exec2.DR1 = args->DR1;
- exec2.DR4 = args->DR4;
- exec2.num_cliprects = args->num_cliprects;
- exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = I915_EXEC_RENDER;
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
}
}
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return ret;
-}
-
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer2 *args = data;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- return -ENOMEM;
- }
- ret = copy_from_user(exec2_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- drm_free_large(exec2_list);
return ret;
}
int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+ BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
WARN_ON(i915_verify_lists(dev));
- if (obj_priv->gtt_space != NULL) {
- if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (obj_priv->gtt_offset & (alignment - 1)) {
- WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
- obj_priv->gtt_offset, alignment);
+ if (obj->gtt_space != NULL) {
+ if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ (map_and_fenceable && !obj->map_and_fenceable)) {
+ WARN(obj->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ obj->gtt_offset, alignment,
+ map_and_fenceable,
+ obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
}
- if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ if (obj->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ map_and_fenceable);
if (ret)
return ret;
}
- obj_priv->pin_count++;
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (obj_priv->pin_count == 1) {
- i915_gem_info_add_pin(dev_priv, obj->size);
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (obj->pin_count++ == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.pinned_list);
}
+ obj->pin_mappable |= map_and_fenceable;
WARN_ON(i915_verify_lists(dev));
return 0;
}
void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
WARN_ON(i915_verify_lists(dev));
- obj_priv->pin_count--;
- BUG_ON(obj_priv->pin_count < 0);
- BUG_ON(obj_priv->gtt_space == NULL);
+ BUG_ON(obj->pin_count == 0);
+ BUG_ON(obj->gtt_space == NULL);
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (obj_priv->pin_count == 0) {
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (--obj->pin_count == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list);
- i915_gem_info_remove_pin(dev_priv, obj->size);
+ obj->pin_mappable = false;
}
WARN_ON(i915_verify_lists(dev));
}
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count++;
- obj_priv->pin_filp = file_priv;
- if (obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment);
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+ if (obj->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment, true);
if (ret)
goto out;
}
@@ -4295,9 +3286,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj_priv->gtt_offset;
+ args->offset = obj->gtt_offset;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4305,38 +3296,36 @@ unlock:
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != file) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count--;
- if (obj_priv->user_pin_count == 0) {
- obj_priv->pin_filp = NULL;
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ obj->pin_filp = NULL;
i915_gem_object_unpin(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4344,48 +3333,50 @@ unlock:
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_busy *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- args->busy = obj_priv->active;
+ args->busy = obj->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
- i915_gem_flush_ring(dev, file_priv,
- obj_priv->ring,
- 0, obj->write_domain);
- } else if (obj_priv->ring->outstanding_lazy_request) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ i915_gem_flush_ring(dev, obj->ring,
+ 0, obj->base.write_domain);
+ } else if (obj->ring->outstanding_lazy_request ==
+ obj->last_rendering_seqno) {
+ struct drm_i915_gem_request *request;
+
/* This ring is not being cleared by active usage,
* so emit a request to do so.
*/
- u32 seqno = i915_add_request(dev,
- NULL, NULL,
- obj_priv->ring);
- if (seqno == 0)
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request)
+ ret = i915_add_request(dev,
+ NULL, request,
+ obj->ring);
+ else
ret = -ENOMEM;
}
@@ -4394,12 +3385,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* are actually unmasked, and our working set ends up being
* larger than required.
*/
- i915_gem_retire_requests_ring(dev, obj_priv->ring);
+ i915_gem_retire_requests_ring(dev, obj->ring);
- args->busy = obj_priv->active;
+ args->busy = obj->active;
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4417,8 +3408,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_madvise *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
switch (args->madv) {
@@ -4433,37 +3423,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_count) {
+ if (obj->pin_count) {
ret = -EINVAL;
goto out;
}
- if (obj_priv->madv != __I915_MADV_PURGED)
- obj_priv->madv = args->madv;
+ if (obj->madv != __I915_MADV_PURGED)
+ obj->madv = args->madv;
/* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj_priv) &&
- obj_priv->gtt_space == NULL)
+ if (i915_gem_object_is_purgeable(obj) &&
+ obj->gtt_space == NULL)
i915_gem_object_truncate(obj);
- args->retained = obj_priv->madv != __I915_MADV_PURGED;
+ args->retained = obj->madv != __I915_MADV_PURGED;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size)
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
@@ -4486,11 +3475,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
- return &obj->base;
+ return obj;
}
int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4500,42 +3493,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
- list_move(&obj_priv->mm_list,
+ list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
- if (obj_priv->mmap_offset)
+ if (obj->base.map_list.map)
i915_gem_free_mmap_offset(obj);
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev_priv, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj_priv->page_cpu_valid);
- kfree(obj_priv->bit_17);
- kfree(obj_priv);
+ kfree(obj->page_cpu_valid);
+ kfree(obj->bit_17);
+ kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
trace_i915_gem_object_destroy(obj);
- while (obj_priv->pin_count > 0)
+ while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
i915_gem_free_object_tail(obj);
@@ -4562,13 +3554,15 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev);
+ ret = i915_gem_evict_inactive(dev, false);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
}
+ i915_gem_reset_fences(dev);
+
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
@@ -4587,82 +3581,15 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-static int
-i915_gem_init_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate seqno page\n");
- ret = -ENOMEM;
- goto err;
- }
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret)
- goto err_unref;
-
- dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
- dev_priv->seqno_page = kmap(obj_priv->pages[0]);
- if (dev_priv->seqno_page == NULL)
- goto err_unpin;
-
- dev_priv->seqno_obj = obj;
- memset(dev_priv->seqno_page, 0, PAGE_SIZE);
-
- return 0;
-
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
-err:
- return ret;
-}
-
-
-static void
-i915_gem_cleanup_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj = dev_priv->seqno_obj;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->seqno_obj = NULL;
-
- dev_priv->seqno_page = NULL;
-}
-
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (HAS_PIPE_CONTROL(dev)) {
- ret = i915_gem_init_pipe_control(dev);
- if (ret)
- return ret;
- }
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
- goto cleanup_pipe_control;
+ return ret;
if (HAS_BSD(dev)) {
ret = intel_init_bsd_ring_buffer(dev);
@@ -4681,12 +3608,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
return 0;
cleanup_bsd_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-cleanup_pipe_control:
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
return ret;
}
@@ -4694,12 +3618,10 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
}
int
@@ -4707,7 +3629,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -4727,14 +3649,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
+ BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
+ }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4796,17 +3716,14 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- init_ring_lists(&dev_priv->render_ring);
- init_ring_lists(&dev_priv->bsd_ring);
- init_ring_lists(&dev_priv->blt_ring);
+ INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
init_completion(&dev_priv->error_completion);
- spin_lock(&shrink_list_lock);
- list_add(&dev_priv->mm.shrink_list, &shrink_list);
- spin_unlock(&shrink_list_lock);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
@@ -4818,6 +3735,8 @@ i915_gem_load(struct drm_device *dev)
}
}
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
@@ -4849,6 +3768,10 @@ i915_gem_load(struct drm_device *dev)
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+ dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.inactive_shrinker);
}
/*
@@ -4918,47 +3841,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
}
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv;
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ char *vaddr;
int i;
- int ret;
int page_count;
- obj_priv = to_intel_bo(obj);
- if (!obj_priv->phys_obj)
+ if (!obj->phys_obj)
return;
+ vaddr = obj->phys_obj->handle->vaddr;
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret)
- goto out;
-
- page_count = obj->size / PAGE_SIZE;
-
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i]);
- char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
-
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ struct page *page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ drm_clflush_pages(&page, 1);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
}
- drm_clflush_pages(obj_priv->pages, page_count);
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
- i915_gem_object_put_pages(obj);
-out:
- obj_priv->phys_obj->cur_obj = NULL;
- obj_priv->phys_obj = NULL;
+ obj->phys_obj->cur_obj = NULL;
+ obj->phys_obj = NULL;
}
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align)
{
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
int ret = 0;
int page_count;
int i;
@@ -4966,10 +3889,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->phys_obj) {
- if (obj_priv->phys_obj->id == id)
+ if (obj->phys_obj) {
+ if (obj->phys_obj->id == id)
return 0;
i915_gem_detach_phys_object(dev, obj);
}
@@ -4977,51 +3898,50 @@ i915_gem_attach_phys_object(struct drm_device *dev,
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size, align);
+ obj->base.size, align);
if (ret) {
- DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
- goto out;
+ DRM_ERROR("failed to init phys object %d size: %zu\n",
+ id, obj->base.size);
+ return ret;
}
}
/* bind to the object */
- obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
- obj_priv->phys_obj->cur_obj = obj;
+ obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+ obj->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret) {
- DRM_ERROR("failed to get page list\n");
- goto out;
- }
-
- page_count = obj->size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i]);
- char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+ struct page *page;
+ char *dst, *src;
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
- }
- i915_gem_object_put_pages(obj);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
return 0;
-out:
- return ret;
}
static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
-
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -5036,7 +3956,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
return -EFAULT;
}
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
return 0;
}
@@ -5074,144 +3994,68 @@ i915_gpu_is_active(struct drm_device *dev)
}
static int
-i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
-{
- drm_i915_private_t *dev_priv, *next_dev;
- struct drm_i915_gem_object *obj_priv, *next_obj;
- int cnt = 0;
- int would_deadlock = 1;
+i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj, *next;
+ int cnt;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ return 0;
/* "fast-path" to count number of available objects */
if (nr_to_scan == 0) {
- spin_lock(&shrink_list_lock);
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (mutex_trylock(&dev->struct_mutex)) {
- list_for_each_entry(obj_priv,
- &dev_priv->mm.inactive_list,
- mm_list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- }
- }
- spin_unlock(&shrink_list_lock);
-
- return (cnt / 100) * sysctl_vfs_cache_pressure;
+ cnt = 0;
+ list_for_each_entry(obj,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
- spin_lock(&shrink_list_lock);
-
rescan:
/* first scan for clean buffers */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev);
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(&obj_priv->base);
- if (--nr_to_scan <= 0)
- break;
- }
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj)) {
+ if (i915_gem_object_unbind(obj) == 0 &&
+ --nr_to_scan == 0)
+ break;
}
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
-
- if (nr_to_scan <= 0)
- break;
}
/* second pass, evict/count anything still on the inactive list */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (nr_to_scan > 0) {
- i915_gem_object_unbind(&obj_priv->base);
- nr_to_scan--;
- } else
- cnt++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
+ cnt = 0;
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (nr_to_scan &&
+ i915_gem_object_unbind(obj) == 0)
+ nr_to_scan--;
+ else
+ cnt++;
}
- if (nr_to_scan) {
- int active = 0;
-
+ if (nr_to_scan && i915_gpu_is_active(dev)) {
/*
* We are desperate for pages, so as a last resort, wait
* for the GPU to finish and discard whatever we can.
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (!mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- if (i915_gpu_is_active(dev)) {
- i915_gpu_idle(dev);
- active++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
- }
-
- if (active)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
-
- spin_unlock(&shrink_list_lock);
-
- if (would_deadlock)
- return -1;
- else if (cnt > 0)
- return (cnt / 100) * sysctl_vfs_cache_pressure;
- else
- return 0;
-}
-
-static struct shrinker shrinker = {
- .shrink = i915_gem_shrink,
- .seeks = DEFAULT_SEEKS,
-};
-
-__init void
-i915_gem_shrinker_init(void)
-{
- register_shrinker(&shrinker);
-}
-
-__exit void
-i915_gem_shrinker_exit(void)
-{
- unregister_shrinker(&shrinker);
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b840a8..29d014c48ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
}
void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
+i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
- DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->pages[page],
+ i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
- obj_priv->gtt_offset +
+ obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
#if WATCH_COHERENCY
void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj_priv->gtt_offset, handle,
+ __func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
- gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
- obj->size);
+ gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
+ backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
- (int)(obj_priv->gtt_offset +
+ (int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1d0cc..78b8cf90c92 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
#include "i915_drm.h"
static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
- struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
- list_add(&obj_priv->evict_list, unwind);
- drm_gem_object_reference(&obj_priv->base);
- return drm_mm_scan_add_block(obj_priv->gtt_space);
+ list_add(&obj->exec_list, unwind);
+ drm_gem_object_reference(&obj->base);
+ return drm_mm_scan_add_block(obj->gtt_space);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
+ if (mappable) {
+ if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0))
+ return 0;
+ } else {
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+ }
/*
* The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- if (mark_free(obj_priv, &unwind_list))
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj_priv->base.write_domain || obj_priv->pin_count)
+ if (obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- if (obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+ if (obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (! obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
- list_for_each_entry(obj_priv, &unwind_list, evict_list) {
- ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ list_for_each_entry(obj, &unwind_list, exec_list) {
+ ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
- drm_gem_object_unreference(&obj_priv->base);
+ drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +144,33 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
- obj_priv = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
- evict_list);
- if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- list_move(&obj_priv->evict_list, &eviction_list);
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ list_move(&obj->exec_list, &eviction_list);
continue;
}
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- obj_priv = list_first_entry(&eviction_list,
- struct drm_i915_gem_object,
- evict_list);
+ obj = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(&obj_priv->base);
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ ret = i915_gem_object_unbind(obj);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
@@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev)
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- BUG_ON(!lists_empty);
-
- return 0;
+ return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- mm_list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+ int ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 00000000000..61129e6759e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1343 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+struct change_domains {
+ uint32_t invalidate_domains;
+ uint32_t flush_domains;
+ uint32_t flush_rings;
+};
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ struct change_domains *cd)
+{
+ uint32_t invalidate_domains = 0, flush_domains = 0;
+
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (obj->base.pending_write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->base.write_domain &&
+ (((obj->base.write_domain != obj->base.pending_read_domains ||
+ obj->ring != ring)) ||
+ (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
+ flush_domains |= obj->base.write_domain;
+ invalidate_domains |=
+ obj->base.pending_read_domains & ~obj->base.write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ /* blow away mappings if mapped through GTT */
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+ i915_gem_release_mmap(obj);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+ * of our domain changes in execbuffers (which clears objects'
+ * write_domains). So if we have a current write domain that we
+ * aren't changing, set pending_write_domain to that.
+ */
+ if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+ obj->base.pending_write_domain = obj->base.write_domain;
+
+ cd->invalidate_domains |= invalidate_domains;
+ cd->flush_domains |= flush_domains;
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= obj->ring->id;
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= ring->id;
+}
+
+struct eb_objects {
+ int and;
+ struct hlist_head buckets[0];
+};
+
+static struct eb_objects *
+eb_create(int size)
+{
+ struct eb_objects *eb;
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ while (count > size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
+ GFP_KERNEL);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[obj->exec_handle & eb->and]);
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct drm_i915_gem_object *obj;
+
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+ if (obj->exec_handle == handle)
+ return obj;
+ }
+
+ return NULL;
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+ kfree(eb);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
+
+ /* we've already hold a reference to all valid objects */
+ target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+ if (unlikely(target_obj == NULL))
+ return -ENOENT;
+
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (unlikely(target_offset == 0)) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ return ret;
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain)) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ return ret;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ return 0;
+
+ /* Check that the relocation address is valid... */
+ if (unlikely(reloc->offset > obj->base.size - 4)) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ return ret;
+ }
+ if (unlikely(reloc->offset & 3)) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ return ret;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (unlikely(reloc->delta >= target_obj->size)) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ return ret;
+ }
+
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
+
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
+
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int i, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
+ if (ret)
+ return ret;
+
+ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct eb_objects *eb,
+ struct list_head *objects,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct list_head *objects,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_exec_object2 *entry;
+ int ret, retry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+
+ /* Attempt to pin all of the buffers into the GTT.
+ * This is done in 3 phases:
+ *
+ * 1a. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 1b. Increment pin count for already bound objects.
+ * 2. Bind new objects.
+ * 3. Decrement pin count.
+ *
+ * This avoid unnecessary unbinding of later objects in order to makr
+ * room for the earlier objects *unless* we need to defragment.
+ */
+ retry = 0;
+ do {
+ ret = 0;
+
+ /* Unbind any ill-fitting objects or pin. */
+ entry = exec;
+ list_for_each_entry(obj, objects, exec_list) {
+ bool need_fence, need_mappable;
+
+ if (!obj->gtt_space) {
+ entry++;
+ continue;
+ }
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ (need_mappable && !obj->map_and_fenceable))
+ ret = i915_gem_object_unbind(obj);
+ else
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ goto err;
+
+ entry++;
+ }
+
+ /* Bind fresh objects */
+ entry = exec;
+ list_for_each_entry(obj, objects, exec_list) {
+ bool need_fence;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ if (!obj->gtt_space) {
+ bool need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ break;
+ }
+
+ if (has_fenced_gpu_access) {
+ if (need_fence) {
+ ret = i915_gem_object_get_fence(obj, ring, 1);
+ if (ret)
+ break;
+ } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ /* XXX pipelined! */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ break;
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
+
+ entry->offset = obj->gtt_offset;
+ entry++;
+ }
+
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+ }
+
+ if (ret != -ENOSPC || retry > 1)
+ return ret;
+
+ /* First attempt, just clear anything that is purgeable.
+ * Second attempt, clear the entire GTT.
+ */
+ ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ if (ret)
+ return ret;
+
+ retry++;
+ } while (1);
+
+err:
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ while (objects != &obj->exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ }
+
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct list_head *objects,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ struct drm_i915_gem_object *obj;
+ int i, total, ret;
+
+ /* We may process another execbuffer during the unlock... */
+ while (list_empty(objects)) {
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec[i].relocation_count;
+
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL) {
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec[i].relocation_count * sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ total += exec[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ /* reacquire the objects */
+ INIT_LIST_HEAD(objects);
+ eb_reset(eb);
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, objects);
+ obj->exec_handle = exec[i].handle;
+ eb_add_object(eb, obj);
+ }
+
+ ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
+ if (ret)
+ goto err;
+
+ total = 0;
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+ exec,
+ reloc + total);
+ if (ret)
+ goto err;
+
+ total += exec->relocation_count;
+ exec++;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ return ret;
+}
+
+static void
+i915_gem_execbuffer_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ uint32_t flush_rings)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (flush_rings & (1 << i))
+ i915_gem_flush_ring(dev, &dev_priv->ring[i],
+ invalidate_domains,
+ flush_domains);
+ }
+}
+
+static int
+i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (INTEL_INFO(obj->base.dev)->gen < 6)
+ return i915_gem_object_wait_rendering(obj, true);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ if (seqno == from->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ ret = i915_add_request(obj->base.dev, NULL, request, from);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
+ }
+
+ from->sync_seqno[idx] = seqno;
+ return intel_ring_sync(to, from, seqno - 1);
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ struct change_domains cd;
+ int ret;
+
+ cd.invalidate_domains = 0;
+ cd.flush_domains = 0;
+ cd.flush_rings = 0;
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
+
+ if (cd.invalidate_domains | cd.flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ cd.invalidate_domains,
+ cd.flush_domains);
+#endif
+ i915_gem_execbuffer_flush(ring->dev,
+ cd.invalidate_domains,
+ cd.flush_domains,
+ cd.flush_rings);
+ }
+
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+ return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ if (!access_ok(VERIFY_READ, ptr, length))
+ return -EFAULT;
+
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (fault_in_pages_readable(ptr, length))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int flips;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+ flips = 0;
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->base.write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+ }
+ if (flips) {
+ int plane, flip_mask, ret;
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->base.write_domain = obj->base.pending_write_domain;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_gem_object_move_to_active(obj, ring, seqno);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->pending_gpu_write = true;
+ list_move_tail(&obj->gpu_write_list,
+ &ring->gpu_write_list);
+ intel_mark_busy(ring->dev, obj);
+ }
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->base.read_domains,
+ obj->base.write_domain);
+ }
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *request;
+ u32 flush_domains;
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires.
+ *
+ * The sampler always gets flushed on i965 (sigh).
+ */
+ flush_domains = 0;
+ if (INTEL_INFO(dev)->gen >= 4)
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+
+ ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL || i915_add_request(dev, file, request, ring)) {
+ i915_gem_next_request_seqno(dev, ring);
+ kfree(request);
+ }
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head objects;
+ struct eb_objects *eb;
+ struct drm_i915_gem_object *batch_obj;
+ struct drm_clip_rect *cliprects = NULL;
+ struct intel_ring_buffer *ring;
+ u32 exec_start, exec_len;
+ u32 seqno;
+ int ret, mode, i;
+
+ if (!i915_gem_check_execbuffer(args)) {
+ DRM_ERROR("execbuf with invalid offset/length\n");
+ return -EINVAL;
+ }
+
+ ret = validate_exec_list(exec, args->buffer_count);
+ if (ret)
+ return ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->ring[RCS];
+ break;
+ case I915_EXEC_BSD:
+ if (!HAS_BSD(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[VCS];
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[BCS];
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ return -EINVAL;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring,
+ I915_EXEC_CONSTANTS_MASK << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_ERROR("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ if (copy_from_user(cliprects,
+ (struct drm_clip_rect __user *)(uintptr_t)
+ args->cliprects_ptr,
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto pre_mutex_err;
+ }
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
+
+ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -EBUSY;
+ goto pre_mutex_err;
+ }
+
+ eb = eb_create(args->buffer_count);
+ if (eb == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ /* Look up object handles */
+ INIT_LIST_HEAD(&objects);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, &objects);
+ obj->exec_handle = exec[i].handle;
+ eb_add_object(eb, obj);
+ }
+
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
+ if (ret)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
+ &objects, eb,
+ exec,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = list_entry(objects.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (batch_obj->base.pending_write_domain) {
+ DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
+ if (ret)
+ goto err;
+
+ seqno = i915_gem_next_request_seqno(dev, ring);
+ for (i = 0; i < I915_NUM_RINGS-1; i++) {
+ if (seqno < ring->sync_seqno[i]) {
+ /* The GPU can not handle its semaphore value wrapping,
+ * so every billion or so execbuffers, we need to stall
+ * the GPU in order to reset the counters.
+ */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto err;
+
+ BUG_ON(ring->sync_seqno[i]);
+ }
+ }
+
+ exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto err;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+
+ i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+ eb_destroy(eb);
+ while (!list_empty(&objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ kfree(cliprects);
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = I915_EXEC_RENDER;
+
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 00000000000..86673e77d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ i915_gem_clflush_object(obj);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+ }
+
+ intel_gtt_chipset_flush();
+}
+
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ ret = intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+
+ return 0;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+ obj->sg_list = NULL;
+ obj->num_sg = 0;
+ }
+
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de70be..22a32b9932c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* Check pitch constriants for all chips & tiling formats */
-bool
+static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-bool
-i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->gtt_space == NULL)
- return true;
+ u32 size;
if (tiling_mode == I915_TILING_NONE)
return true;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
-
- if (IS_GEN3(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ if (obj->gtt_offset & ~I830_FENCE_START_MASK)
return false;
}
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+ if (obj->gtt_space->size != size)
+ return false;
+
+ if (obj->gtt_offset & (size - 1))
+ return false;
+
return true;
}
@@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
- if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- drm_gem_object_unreference_unlocked(obj);
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
- if (obj_priv->pin_count) {
- drm_gem_object_unreference_unlocked(obj);
+ if (obj->pin_count) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- if (args->tiling_mode != obj_priv->tiling_mode ||
- args->stride != obj_priv->stride) {
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
*/
- if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
- else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj, true);
- else
- i915_gem_release_mmap(obj);
+ i915_gem_release_mmap(obj);
- if (ret != 0) {
- args->tiling_mode = obj_priv->tiling_mode;
- args->stride = obj_priv->stride;
- goto err;
- }
+ obj->map_and_fenceable =
+ obj->gtt_space == NULL ||
+ (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj_priv->tiling_mode = args->tiling_mode;
- obj_priv->stride = args->stride;
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
}
-err:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
/**
@@ -359,22 +364,20 @@ err:
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
@@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page)
}
void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL)
+ if (obj->bit_17 == NULL)
return;
for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj_priv->bit_17) != 0)) {
- i915_gem_swizzle_page(obj_priv->pages[i]);
- set_page_dirty(obj_priv->pages[i]);
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(obj->pages[i]);
+ set_page_dirty(obj->pages[i]);
}
}
}
void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL) {
- obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
- if (obj_priv->bit_17 == NULL) {
+ if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
@@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
}
for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
- __set_bit(i, obj_priv->bit_17);
+ if (page_to_phys(obj->pages[i]) & (1 << 17))
+ __set_bit(i, obj->bit_17);
else
- __clear_bit(i, obj_priv->bit_17);
+ __clear_bit(i, obj->bit_17);
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c91d7..0dadc025b77 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -67,20 +67,20 @@
void
ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
- dev_priv->gt_irq_mask_reg &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ if ((dev_priv->gt_irq_mask & mask) != 0) {
+ dev_priv->gt_irq_mask &= ~mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
}
void
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
- dev_priv->gt_irq_mask_reg |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ if ((dev_priv->gt_irq_mask & mask) != mask) {
+ dev_priv->gt_irq_mask |= mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
}
@@ -88,40 +88,40 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
static inline void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
}
}
void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
}
}
@@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
@@ -156,16 +156,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
/**
* intel_enable_asle - enable ASLE interrupt for OpRegion
*/
-void intel_enable_asle (struct drm_device *dev)
+void intel_enable_asle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
/**
@@ -243,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vbl = 0, position = 0;
+ int vbl_start, vbl_end, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+ "pipe %d\n", pipe);
+ return 0;
+ }
+
+ /* Get vtotal. */
+ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = I915_READ(PIPEDSL(pipe));
+
+ /* Decode into vertical scanout position. Don't have
+ * horizontal scanout position.
+ */
+ *vpos = position & 0x1fff;
+ *hpos = 0;
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* Query vblank area. */
+ vbl = I915_READ(VBLANK(pipe));
+
+ /* Test position against vblank region. */
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+
+ if ((*vpos < vbl_start) || (*vpos > vbl_end))
+ in_vbl = false;
+
+ /* Inside "upper part" of vblank area? Apply corrective offset: */
+ if (in_vbl && (*vpos >= vbl_start))
+ *vpos = *vpos - vtotal;
+
+ /* Readouts valid? */
+ if (vbl > 0)
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *drmcrtc;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+ vblank_time, flags, drmcrtc);
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -297,8 +388,8 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno = ring->get_seqno(dev, ring);
- ring->irq_gem_seqno = seqno;
+ u32 seqno = ring->get_seqno(ring);
+ ring->irq_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
wake_up_all(&ring->irq_queue);
dev_priv->hangcheck_count = 0;
@@ -306,11 +397,49 @@ static void notify_ring(struct drm_device *dev,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
+static void gen6_pm_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u8 new_delay = dev_priv->cur_delay;
+ u32 pm_iir;
+
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (!pm_iir)
+ return;
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->min_delay) {
+ new_delay = dev_priv->min_delay;
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+ ((new_delay << 16) & 0x3f0000));
+ } else {
+ /* Make sure we continue to get down interrupts
+ * until we hit the minimum frequency */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
+ }
+
+ }
+
+ gen6_set_rps(dev, new_delay);
+ dev_priv->cur_delay = new_delay;
+
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+}
+
static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +450,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+ (!IS_GEN6(dev) || pm_iir == 0))
goto done;
if (HAS_PCH_CPT(dev))
@@ -344,12 +475,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_PIPE_NOTIFY)
- notify_ring(dev, &dev_priv->render_ring);
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->bsd_ring);
- if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->blt_ring);
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -379,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
i915_handle_rps_change(dev);
}
+ if (IS_GEN6(dev))
+ gen6_pm_irq_handler(dev);
+
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +520,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
done:
I915_WRITE(DEIER, de_ier);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
return ret;
}
@@ -423,28 +557,23 @@ static void i915_error_work_func(struct work_struct *work)
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
- struct drm_gem_object *src)
+ struct drm_i915_gem_object *src)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
- struct drm_i915_gem_object *src_priv;
int page, page_count;
u32 reloc_offset;
- if (src == NULL)
- return NULL;
-
- src_priv = to_intel_bo(src);
- if (src_priv->pages == NULL)
+ if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->size / PAGE_SIZE;
+ page_count = src->base.size / PAGE_SIZE;
dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
- reloc_offset = src_priv->gtt_offset;
+ reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
void __iomem *s;
@@ -466,7 +595,7 @@ i915_error_object_create(struct drm_device *dev,
reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
- dst->gtt_offset = src_priv->gtt_offset;
+ dst->gtt_offset = src->gtt_offset;
return dst;
@@ -520,36 +649,96 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
}
static u32
-i915_ringbuffer_last_batch(struct drm_device *dev)
+i915_ringbuffer_last_batch(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 head, bbaddr;
- u32 *ring;
+ u32 *val;
/* Locate the current position in the ringbuffer and walk back
* to find the most recently dispatched batch buffer.
*/
- bbaddr = 0;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
+ val = (u32 *)(ring->virtual_start + head);
+ while (--val >= (u32 *)ring->virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, val);
if (bbaddr)
- break;
+ return bbaddr;
}
- if (bbaddr == 0) {
- ring = (u32 *)(dev_priv->render_ring.virtual_start
- + dev_priv->render_ring.size);
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
- if (bbaddr)
- break;
- }
+ val = (u32 *)(ring->virtual_start + ring->size);
+ while (--val >= (u32 *)ring->virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, val);
+ if (bbaddr)
+ return bbaddr;
}
- return bbaddr;
+ return 0;
+}
+
+static u32 capture_bo_list(struct drm_i915_error_buffer *err,
+ int count,
+ struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : 0;
+
+ if (++i == count)
+ break;
+
+ err++;
+ }
+
+ return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ }
}
/**
@@ -564,9 +753,9 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
- struct drm_gem_object *batchbuffer[2];
+ struct drm_i915_gem_object *batchbuffer[2];
unsigned long flags;
u32 bbaddr;
int count;
@@ -585,20 +774,33 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_DEBUG_DRIVER("generating error event\n");
- error->seqno =
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+ error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
error->instpm = I915_READ(INSTPM);
- if (INTEL_INFO(dev)->gen < 4) {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- error->bbaddr = 0;
- } else {
+ error->error = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+
+ error->bcs_acthd = I915_READ(BCS_ACTHD);
+ error->bcs_ipehr = I915_READ(BCS_IPEHR);
+ error->bcs_ipeir = I915_READ(BCS_IPEIR);
+ error->bcs_instdone = I915_READ(BCS_INSTDONE);
+ error->bcs_seqno = 0;
+ if (dev_priv->ring[BCS].get_seqno)
+ error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+
+ error->vcs_acthd = I915_READ(VCS_ACTHD);
+ error->vcs_ipehr = I915_READ(VCS_IPEHR);
+ error->vcs_ipeir = I915_READ(VCS_IPEIR);
+ error->vcs_instdone = I915_READ(VCS_INSTDONE);
+ error->vcs_seqno = 0;
+ if (dev_priv->ring[VCS].get_seqno)
+ error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+ }
+ if (INTEL_INFO(dev)->gen >= 4) {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
error->instdone = I915_READ(INSTDONE_I965);
@@ -606,42 +808,45 @@ static void i915_capture_error_state(struct drm_device *dev)
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+ error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
}
+ i915_gem_record_fences(dev, error);
- bbaddr = i915_ringbuffer_last_batch(dev);
+ bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
/* Grab the current batchbuffer, most likely to have crashed. */
batchbuffer[0] = NULL;
batchbuffer[1] = NULL;
count = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
count++;
}
/* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -649,17 +854,15 @@ static void i915_capture_error_state(struct drm_device *dev)
}
}
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -678,46 +881,41 @@ static void i915_capture_error_state(struct drm_device *dev)
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
- dev_priv->render_ring.gem_object);
+ dev_priv->ring[RCS].obj);
- /* Record buffers on the active list. */
+ /* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
- error->active_bo_count = 0;
+ error->pinned_bo = NULL;
- if (count)
+ error->active_bo_count = count;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ count++;
+ error->pinned_bo_count = count - error->active_bo_count;
+
+ if (count) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
GFP_ATOMIC);
-
- if (error->active_bo) {
- int i = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- error->active_bo[i].size = obj->size;
- error->active_bo[i].name = obj->name;
- error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
- error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
- error->active_bo[i].read_domains = obj->read_domains;
- error->active_bo[i].write_domain = obj->write_domain;
- error->active_bo[i].fence_reg = obj_priv->fence_reg;
- error->active_bo[i].pinned = 0;
- if (obj_priv->pin_count > 0)
- error->active_bo[i].pinned = 1;
- if (obj_priv->user_pin_count > 0)
- error->active_bo[i].pinned = -1;
- error->active_bo[i].tiling = obj_priv->tiling_mode;
- error->active_bo[i].dirty = obj_priv->dirty;
- error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
-
- if (++i == count)
- break;
- }
- error->active_bo_count = i;
+ if (error->active_bo)
+ error->pinned_bo =
+ error->active_bo + error->active_bo_count;
}
+ if (error->active_bo)
+ error->active_bo_count =
+ capture_bo_list(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count =
+ capture_bo_list(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.pinned_list);
+
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
@@ -775,7 +973,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +981,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -794,7 +992,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -825,7 +1023,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
- (void)I915_READ(IPEIR);
+ POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
@@ -842,12 +1040,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
}
I915_WRITE(EIR, eir);
- (void)I915_READ(EIR);
+ POSTING_READ(EIR);
eir = I915_READ(EIR);
if (eir) {
/*
@@ -870,7 +1068,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -884,11 +1082,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- wake_up_all(&dev_priv->blt_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[BCS].irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1097,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
unsigned long flags;
bool stall_detected;
@@ -918,13 +1116,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
- obj_priv = to_intel_bo(work->pending_flip_obj);
+ obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
- stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
- stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -970,7 +1168,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
pipea_stats = I915_READ(PIPEASTAT);
pipeb_stats = I915_READ(PIPEBSTAT);
@@ -993,7 +1191,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (!irq_received)
break;
@@ -1026,9 +1224,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
- notify_ring(dev, &dev_priv->bsd_ring);
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
@@ -1101,12 +1299,13 @@ static int i915_emit_irq(struct drm_device * dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
return dev_priv->counter;
}
@@ -1114,12 +1313,11 @@ static int i915_emit_irq(struct drm_device * dev)
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- if (dev_priv->trace_irq_seqno == 0)
- render_ring->user_irq_get(dev, render_ring);
-
- dev_priv->trace_irq_seqno = seqno;
+ if (dev_priv->trace_irq_seqno == 0 &&
+ ring->irq_get(ring))
+ dev_priv->trace_irq_seqno = seqno;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1325,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1339,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- render_ring->user_irq_get(dev, render_ring);
- DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- render_ring->user_irq_put(dev, render_ring);
+ ret = -ENODEV;
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ }
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1363,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- if (!dev_priv || !dev_priv->render_ring.virtual_start) {
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1209,9 +1409,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1419,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1231,15 +1431,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1506,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-static struct drm_i915_gem_request *
-i915_get_tail_request(struct drm_device *dev)
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->render_ring.request_list.prev,
- struct drm_i915_gem_request, list);
+ return list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+ if (list_empty(&ring->request_list) ||
+ i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ /* Issue a wake-up to catch stuck h/w. */
+ if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+ ring->name,
+ ring->waiting_seqno,
+ ring->get_seqno(ring));
+ wake_up_all(&ring->irq_queue);
+ *err = true;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ DRM_ERROR("Kicking stuck wait on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ if (IS_GEN6(dev) &&
+ (tmp & RING_WAIT_SEMAPHORE)) {
+ DRM_ERROR("Kicking stuck semaphore on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ return false;
}
/**
@@ -1325,6 +1563,17 @@ void i915_hangcheck_elapsed(unsigned long data)
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1;
+ bool err = false;
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
+ dev_priv->hangcheck_count = 0;
+ if (err)
+ goto repeat;
+ return;
+ }
if (INTEL_INFO(dev)->gen < 4) {
acthd = I915_READ(ACTHD);
@@ -1336,38 +1585,6 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone1 = I915_READ(INSTDONE1);
}
- /* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->render_ring.request_list) ||
- i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
- i915_get_tail_request(dev)->seqno)) {
- bool missed_wakeup = false;
-
- dev_priv->hangcheck_count = 0;
-
- /* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- wake_up_all(&dev_priv->render_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->bsd_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->blt_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
- wake_up_all(&dev_priv->blt_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (missed_wakeup)
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
- return;
- }
-
if (dev_priv->last_acthd == acthd &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1597,17 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- u32 tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- goto out;
- }
+
+ if (kick_ring(&dev_priv->ring[RCS]))
+ goto repeat;
+
+ if (HAS_BSD(dev) &&
+ kick_ring(&dev_priv->ring[VCS]))
+ goto repeat;
+
+ if (HAS_BLT(dev) &&
+ kick_ring(&dev_priv->ring[BCS]))
+ goto repeat;
}
i915_handle_error(dev, true);
@@ -1399,7 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 = instdone1;
}
-out:
+repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1639,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
- (void) I915_READ(DEIER);
+ POSTING_READ(DEIER);
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
- (void) I915_READ(GTIER);
+ POSTING_READ(GTIER);
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
- (void) I915_READ(SDEIER);
+ POSTING_READ(SDEIER);
}
static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1658,39 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+ u32 render_irqs;
u32 hotplug_mask;
- dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+ dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+ POSTING_READ(DEIER);
- if (IS_GEN6(dev)) {
- render_mask =
- GT_PIPE_NOTIFY |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
- }
-
- dev_priv->gt_irq_mask_reg = ~render_mask;
- dev_priv->gt_irq_enable_reg = render_mask;
+ dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
if (IS_GEN6(dev)) {
- I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
- I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+ I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
}
- I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
- (void) I915_READ(GTIER);
+ if (IS_GEN6(dev))
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ else
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_BSD_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
@@ -1477,13 +1700,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
}
- dev_priv->pch_irq_mask_reg = ~hotplug_mask;
- dev_priv->pch_irq_enable_reg = hotplug_mask;
+ dev_priv->pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
- I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
- (void) I915_READ(SDEIER);
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
@@ -1519,7 +1741,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
I915_WRITE(PIPEBSTAT, 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
}
/*
@@ -1532,11 +1754,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
@@ -1544,7 +1766,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -1553,7 +1775,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
/*
@@ -1571,9 +1793,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cb8f4342927..8f948a6fbc1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GEN6_GDRST 0x941c
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -158,12 +164,23 @@
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
/*
* 3D instructions used by the kernel
*/
@@ -256,10 +273,6 @@
* Instruction and interrupt control regs
*/
#define PGTBL_ER 0x02024
-#define PRB0_TAIL 0x02030
-#define PRB0_HEAD 0x02034
-#define PRB0_START 0x02038
-#define PRB0_CTL 0x0203c
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -268,9 +281,13 @@
#define RING_HEAD(base) ((base)+0x34)
#define RING_START(base) ((base)+0x38)
#define RING_CTL(base) ((base)+0x3c)
+#define RING_SYNC_0(base) ((base)+0x40)
+#define RING_SYNC_1(base) ((base)+0x44)
+#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_ACTHD(base) ((base)+0x74)
+#define RING_NOPID(base) ((base)+0x94)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -285,10 +302,17 @@
#define RING_INVALID 0x00000000
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#endif
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
@@ -305,11 +329,42 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define VCS_INSTDONE 0x1206C
+#define VCS_IPEIR 0x12064
+#define VCS_IPEHR 0x12068
+#define VCS_ACTHD 0x12074
+#define BCS_INSTDONE 0x2206C
+#define BCS_IPEIR 0x22064
+#define BCS_IPEHR 0x22068
+#define BCS_ACTHD 0x22074
+
+#define ERROR_GEN6 0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN2 0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+#define _3D_CHICKEN3 0x02090
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 11)
+#define GFX_MODE 0x02520
+#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
+#define GFX_SURFACE_FAULT_ENABLE (1<<12)
+#define GFX_REPLAY_MODE (1<<11)
+#define GFX_PSMI_GRANULARITY (1<<10)
+#define GFX_PPGTT_ENABLE (1<<9)
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -461,7 +516,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
#define GEN6_BSD_IMR 0x120a8
-#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
+#define GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GEN6_BSD_RNCID 0x12198
@@ -541,6 +596,18 @@
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
+#define ILK_PABSTRETCH_DIS (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA 0x100100
+#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET 0x100104
+
/*
* GPIO regs
@@ -900,6 +967,8 @@
*/
#define MCHBAR_MIRROR_BASE 0x10000
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1119,6 +1188,10 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_PERF_STATUS 0x145948
+#define GEN6_RP_STATE_LIMITS 0x145994
+#define GEN6_RP_STATE_CAP 0x145998
+
/*
* Logical Context regs
*/
@@ -1168,7 +1241,6 @@
#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
/* VGA port control */
@@ -2182,8 +2254,10 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2291,6 +2365,40 @@
#define ILK_FIFO_LINE_SIZE 64
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO 128
+#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
+#define SNB_DISPLAY_DFTWM 8
+#define SNB_CURSOR_FIFO 32
+#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
+#define SNB_CURSOR_DFTWM 8
+
+#define SNB_DISPLAY_SR_FIFO 512
+#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM 0x3f
+#define SNB_CURSOR_SR_FIFO 64
+#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM 8
+
+#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE 64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD 0x5d10
+#define SSKPD_WM_MASK 0x3f
+#define SSKPD_WM0_SHIFT 0
+#define SSKPD_WM1_SHIFT 8
+#define SSKPD_WM2_SHIFT 16
+#define SSKPD_WM3_SHIFT 24
+
+#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2459,10 @@
#define CURBBASE 0x700c4
#define CURBPOS 0x700c8
+#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+
/* Display A control */
#define DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -2589,6 +2701,8 @@
#define GTIER 0x4401c
#define ILK_DISPLAY_CHICKEN2 0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define ILK_ELPIN_409_SELECT (1 << 25)
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
@@ -2600,6 +2714,8 @@
#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
+#define ILK_DPFD_CLK_GATE (1<<7)
+
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
#define ILK_CLK_FBC (1<<7)
#define ILK_DPFC_DIS1 (1<<8)
@@ -2679,6 +2795,7 @@
#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define PCH_FPA0 0xc6040
+#define FP_CB_TUNE (0x3<<22)
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
@@ -3063,4 +3180,66 @@
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define FORCEWAKE 0xA18C
+#define FORCEWAKE_ACK 0x130090
+
+#define GEN6_RPNSWREQ 0xA008
+#define GEN6_TURBO_DISABLE (1<<31)
+#define GEN6_FREQUENCY(x) ((x)<<25)
+#define GEN6_OFFSET(x) ((x)<<19)
+#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_RC_VIDEO_FREQ 0xA00C
+#define GEN6_RC_CONTROL 0xA090
+#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
+#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
+#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
+#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RP_DOWN_TIMEOUT 0xA010
+#define GEN6_RP_INTERRUPT_LIMITS 0xA014
+#define GEN6_RPSTAT1 0xA01C
+#define GEN6_RP_CONTROL 0xA024
+#define GEN6_RP_MEDIA_TURBO (1<<11)
+#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
+#define GEN6_RP_MEDIA_IS_GFX (1<<8)
+#define GEN6_RP_ENABLE (1<<7)
+#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
+#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
+#define GEN6_RP_UP_THRESHOLD 0xA02C
+#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_UP_EI 0xA068
+#define GEN6_RP_DOWN_EI 0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RC_STATE 0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
+#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RC1e_THRESHOLD 0xA0B4
+#define GEN6_RC6_THRESHOLD 0xA0B8
+#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define GEN6_RC6pp_THRESHOLD 0xA0C0
+#define GEN6_PMINTRMSK 0xA168
+
+#define GEN6_PMISR 0x44020
+#define GEN6_PMIMR 0x44024
+#define GEN6_PMIIR 0x44028
+#define GEN6_PMIER 0x4402C
+#define GEN6_PM_MBOX_EVENT (1<<25)
+#define GEN6_PM_THERMAL_EVENT (1<<24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
+#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+
+#define GEN6_PCODE_MAILBOX 0x138124
+#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_DATA 0x138128
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 42729d25da5..410772466fa 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,6 +235,7 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -367,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
return;
}
@@ -375,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ }
+
+
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
@@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ /* XXX disabling the clock gating breaks suspend on gm45
+ intel_disable_clock_gating(dev);
+ */
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- }
-
return 0;
}
@@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev)
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- break;
- }
-
i915_restore_display(dev);
/* Interrupt state */
@@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a21cc1..7f0fc3ed61a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
#include <linux/tracepoint.h>
#include <drm/drmP.h>
+#include "i915_drv.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
TRACE_EVENT(i915_gem_object_create,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->size = obj->size;
+ __entry->size = obj->base.size;
),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create,
TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
- TP_ARGS(obj, gtt_offset),
+ TP_ARGS(obj, gtt_offset, mappable),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, gtt_offset)
+ __field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
__entry->gtt_offset = gtt_offset;
+ __entry->mappable = mappable;
),
- TP_printk("obj=%p, gtt_offset=%08x",
- __entry->obj, __entry->gtt_offset)
+ TP_printk("obj=%p, gtt_offset=%08x%s",
+ __entry->obj, __entry->gtt_offset,
+ __entry->mappable ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_object_change_domain,
- TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+ TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
TP_ARGS(obj, old_read_domains, old_write_domain),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
),
TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain,
__entry->read_domains, __entry->write_domain)
);
-TRACE_EVENT(i915_gem_object_get_fence,
-
- TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
-
- TP_ARGS(obj, fence, tiling_mode),
-
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- __field(int, fence)
- __field(int, tiling_mode)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- __entry->fence = fence;
- __entry->tiling_mode = tiling_mode;
- ),
-
- TP_printk("obj=%p, fence=%d, tiling=%d",
- __entry->obj, __entry->fence, __entry->tiling_mode)
-);
-
DECLARE_EVENT_CLASS(i915_gem_object,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
@@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
);
TRACE_EVENT(i915_flip_request,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request,
);
TRACE_EVENT(i915_flip_complete,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete,
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
+TRACE_EVENT(i915_reg_rw,
+ TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+
+ TP_ARGS(cmd, reg, val, len),
+
+ TP_STRUCT__entry(
+ __field(int, cmd)
+ __field(uint32_t, reg)
+ __field(uint64_t, val)
+ __field(int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->reg = reg;
+ __entry->val = (uint64_t)val;
+ __entry->len = len;
+ ),
+
+ TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
+ __entry->cmd, __entry->reg, __entry->val, __entry->len)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fca523288ac..0abe79fb638 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- int refclk = 120;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
- refclk = 100;
-
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
/* LVDS dual channel */
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
limit = &intel_limits_ironlake_dual_lvds;
} else {
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_single_lvds_100m;
else
limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc);
+ limit = intel_ironlake_limit(crtc, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+static bool intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
{
- const intel_limit_t *limit = intel_limit (crtc);
- struct drm_device *dev = crtc->dev;
-
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
- this_err = abs(clock.dot - target) ;
+
+ this_err = abs(clock.dot - target);
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i;
u32 fbc_ctl, fbc_ctl2;
if (fb->pitch == dev_priv->cfb_pitch &&
- obj_priv->fence_reg == dev_priv->cfb_fence &&
+ obj->fence_reg == dev_priv->cfb_fence &&
intel_crtc->plane == dev_priv->cfb_plane &&
I915_READ(FBC_CONTROL) & FBC_CTL_EN)
return;
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* FBC_CTL wants 64B units */
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1221,7 +1219,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1230,9 +1228,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_offset == obj_priv->gtt_offset &&
+ dev_priv->cfb_offset == obj->gtt_offset &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1242,14 +1240,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_offset = obj_priv->gtt_offset;
+ dev_priv->cfb_offset = obj->gtt_offset;
dev_priv->cfb_y = crtc->y;
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1260,10 +1258,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ }
+
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
@@ -1345,7 +1349,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
DRM_DEBUG_KMS("\n");
@@ -1384,9 +1388,9 @@ static void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
- obj_priv = to_intel_bo(intel_fb->obj);
+ obj = intel_fb->obj;
- if (intel_fb->obj->size > dev_priv->cfb_size) {
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1414,7 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
- if (obj_priv->tiling_mode != I915_TILING_X) {
+ if (obj->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
@@ -1433,14 +1437,13 @@ out_disable:
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined)
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
int ret;
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
@@ -1461,7 +1464,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
- ret = i915_gem_object_pin(obj, alignment);
+ ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
return ret;
@@ -1474,9 +1477,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, false);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence(obj, pipelined, false);
if (ret)
goto err_unpin;
}
@@ -1497,8 +1499,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
u32 dspcntr;
@@ -1515,7 +1516,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- obj_priv = to_intel_bo(obj);
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
@@ -1540,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1552,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj_priv->gtt_offset;
+ Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1598,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(crtc->fb)->obj,
- false);
+ NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1606,18 +1606,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
*/
- ret = i915_gem_object_flush_gpu(obj_priv, false);
+ ret = i915_gem_object_flush_gpu(obj, false);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -1633,8 +1632,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
+ if (old_fb) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ }
mutex_unlock(&dev->struct_mutex);
@@ -1996,31 +1997,31 @@ static void intel_flush_display_plane(struct drm_device *dev,
static void intel_clear_scanline_wait(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
u32 tmp;
if (IS_GEN2(dev))
/* Can't break the hang on i8xx */
return;
- tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- }
+ ring = LP_RING(dev_priv);
+ tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT)
+ I915_WRITE_CTL(ring, tmp);
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv;
if (crtc->fb == NULL)
return;
- obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+ obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&obj->pending_flip) == 0);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2850,6 +2851,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_FIFO_LINE_SIZE
};
+static struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -3383,6 +3417,10 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
static bool ironlake_compute_wm0(struct drm_device *dev,
int pipe,
+ const struct intel_watermark_params *display,
+ int display_latency,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency,
int *plane_wm,
int *cursor_wm)
{
@@ -3400,22 +3438,20 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
- entries = DIV_ROUND_UP(entries,
- ironlake_display_wm_info.cacheline_size);
- *plane_wm = entries + ironlake_display_wm_info.guard_size;
- if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
- *plane_wm = ironlake_display_wm_info.max_wm;
+ entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
- entries = DIV_ROUND_UP(entries,
- ironlake_cursor_wm_info.cacheline_size);
- *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
- if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
- *cursor_wm = ironlake_cursor_wm_info.max_wm;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
return true;
}
@@ -3430,7 +3466,12 @@ static void ironlake_update_wm(struct drm_device *dev,
int tmp;
enabled = 0;
- if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3439,7 +3480,12 @@ static void ironlake_update_wm(struct drm_device *dev,
enabled++;
}
- if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3453,7 +3499,7 @@ static void ironlake_update_wm(struct drm_device *dev,
* display plane is used.
*/
tmp = 0;
- if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
+ if (enabled == 1) {
unsigned long line_time_us;
int small, large, plane_fbc;
int sr_clock, entries;
@@ -3505,6 +3551,197 @@ static void ironlake_update_wm(struct drm_device *dev,
/* XXX setup WM2 and WM3 */
}
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ *
+ * Also return true if all of those watermark values is 0, which is set by
+ * sandybridge_compute_srwm, to indicate the latency is ZERO.
+ */
+static bool sandybridge_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > SNB_DISPLAY_MAX_SRWM) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > SNB_CURSOR_MAX_SRWM) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
+ int hdisplay, int htotal, int pixel_size,
+ int clock, int latency_ns, int *fbc_wm,
+ int *display_wm, int *cursor_wm)
+{
+
+ unsigned long line_time_us;
+ int small, large;
+ int entries;
+ int line_count, line_size;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large),
+ sandybridge_display_srwm_info.cacheline_size);
+ *display_wm = entries + sandybridge_display_srwm_info.guard_size;
+
+ /*
+ * Spec said:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ sandybridge_cursor_srwm_info.cacheline_size);
+ *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
+
+ return sandybridge_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm);
+}
+
+static void sandybridge_update_wm(struct drm_device *dev,
+ int planea_clock, int planeb_clock,
+ int hdisplay, int htotal,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY();
+ int fbc_wm, plane_wm, cursor_wm, enabled;
+ int clock;
+
+ enabled = 0;
+ if (ironlake_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
+
+ if (ironlake_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (enabled != 1)
+ return;
+
+ clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* WM1 */
+ if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM1_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!sandybridge_compute_srwm(dev, 2,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM2_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!sandybridge_compute_srwm(dev, 3,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM3_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3660,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc);
+ limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3857,6 +4094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
+ /* Enable autotuning of the PLL clock (if permissible) */
+ if (HAS_PCH_SPLIT(dev)) {
+ int factor = 21;
+
+ if (is_lvds) {
+ if ((dev_priv->lvds_use_ssc &&
+ dev_priv->lvds_ssc_freq == 100) ||
+ (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ factor = 25;
+ } else if (is_sdvo && is_tv)
+ factor = 20;
+
+ if (clock.m1 < factor * clock.n)
+ fp |= FP_CB_TUNE;
+ }
+
dpll = 0;
if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
@@ -4071,7 +4324,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
/* Wait for the clocks to stabilize. */
@@ -4089,13 +4341,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
- /* write it again -- the BIOS does, after all */
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
I915_WRITE(dpll_reg, dpll);
}
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(dpll_reg);
- udelay(150);
}
intel_crtc->lowfreq_avail = false;
@@ -4331,15 +4583,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
+ struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_gem_object *bo;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t addr;
int ret;
@@ -4349,7 +4600,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
- bo = NULL;
+ obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
@@ -4360,13 +4611,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- bo = drm_gem_object_lookup(dev, file_priv, handle);
- if (!bo)
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (!obj)
return -ENOENT;
- obj_priv = to_intel_bo(bo);
-
- if (bo->size < width * height * 4) {
+ if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
ret = -ENOMEM;
goto fail;
@@ -4375,29 +4624,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
- ret = i915_gem_object_pin(bo, PAGE_SIZE);
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
- ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 0);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin;
}
- addr = obj_priv->gtt_offset;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
+ addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_attach_phys_object(dev, bo,
+ ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
}
- addr = obj_priv->phys_obj->handle->busaddr;
+ addr = obj->phys_obj->handle->busaddr;
}
if (IS_GEN2(dev))
@@ -4406,17 +4667,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != bo)
+ if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
- drm_gem_object_unreference(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = bo;
+ intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
@@ -4424,11 +4685,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail_unpin:
- i915_gem_object_unpin(bo);
+ i915_gem_object_unpin(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
@@ -4739,8 +5000,14 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- dev_priv->busy = false;
+ if (!list_empty(&dev_priv->mm.active_list)) {
+ /* Still processing requests, so just re-arm the timer. */
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ return;
+ }
+ dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4751,9 +5018,17 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+ struct intel_framebuffer *intel_fb;
- intel_crtc->busy = false;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ if (intel_fb && intel_fb->obj->active) {
+ /* The framebuffer is still being accessed by the GPU. */
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ return;
+ }
+ intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4888,7 +5163,7 @@ static void intel_idle_update(struct work_struct *work)
* buffer), we'll also mark the display as busy, so we know to increase its
* clock frequency.
*/
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
@@ -4969,8 +5244,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj);
- drm_gem_object_unreference(work->pending_flip_obj);
- drm_gem_object_unreference(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -4981,15 +5257,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
- struct timeval now;
+ struct timeval tnow, tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
+ do_gettimeofday(&tnow);
+
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
@@ -4998,26 +5276,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
}
intel_crtc->unpin_work = NULL;
- drm_vblank_put(dev, intel_crtc->pipe);
if (work->event) {
e = work->event;
- do_gettimeofday(&now);
- e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+ /* Called before vblank count and timestamps have
+ * been updated for the vblank interval of flip
+ * completion? Need to increment vblank count and
+ * add one videorefresh duration to returned timestamp
+ * to account for this. We assume this happened if we
+ * get called over 0.9 frame durations after the last
+ * timestamped vblank.
+ *
+ * This calculation can not be used with vrefresh rates
+ * below 5Hz (10Hz to be on the safe side) without
+ * promoting to 64 integers.
+ */
+ if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
+ 9 * crtc->framedur_ns) {
+ e->event.sequence++;
+ tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
+ crtc->framedur_ns);
+ }
+
+ e->event.tv_sec = tvbl.tv_sec;
+ e->event.tv_usec = tvbl.tv_usec;
+
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
+ drm_vblank_put(dev, intel_crtc->pipe);
+
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = to_intel_bo(work->old_fb_obj);
+ obj = work->old_fb_obj;
+
atomic_clear_mask(1 << intel_crtc->plane,
- &obj_priv->pending_flip.counter);
- if (atomic_read(&obj_priv->pending_flip) == 0)
+ &obj->pending_flip.counter);
+ if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
+
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5063,8 +5364,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
@@ -5098,13 +5398,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto cleanup_work;
/* Reference the objects for the scheduled work. */
- drm_gem_object_reference(work->old_fb_obj);
- drm_gem_object_reference(obj);
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
crtc->fb = fb;
@@ -5112,22 +5412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
- atomic_add(1 << intel_crtc->plane,
- &to_intel_bo(work->old_fb_obj)->pending_flip);
-
- work->pending_flip_obj = obj;
- obj_priv = to_intel_bo(obj);
-
if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ goto cleanup_objs;
+
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
@@ -5137,18 +5431,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ADVANCE_LP_RING();
}
+ work->pending_flip_obj = obj;
+
work->enable_stall_check = true;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
- BEGIN_LP_RING(4);
- switch(INTEL_INFO(dev)->gen) {
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto cleanup_objs;
+
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+ switch (INTEL_INFO(dev)->gen) {
case 2:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5156,7 +5460,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5169,7 +5473,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -5183,8 +5487,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
case 6:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj_priv->tiling_mode);
- OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5200,8 +5504,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_objs:
- drm_gem_object_unreference(work->old_fb_obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
cleanup_work:
mutex_unlock(&dev->struct_mutex);
@@ -5338,7 +5642,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5505,19 +5809,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(intel_fb->obj);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
+ struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_gem_object *object = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb->obj;
- return drm_gem_handle_create(file_priv, object, handle);
+ return drm_gem_handle_create(file, &obj->base, handle);
}
static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5528,12 +5832,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
if (mode_cmd->pitch & 63)
@@ -5565,11 +5868,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_framebuffer *intel_fb;
int ret;
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (!obj)
return ERR_PTR(-ENOENT);
@@ -5577,10 +5880,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(dev, intel_fb,
- mode_cmd, obj);
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
}
@@ -5593,10 +5895,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_gem_object *
+static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
- struct drm_gem_object *ctx;
+ struct drm_i915_gem_object *ctx;
int ret;
ctx = i915_gem_alloc_object(dev, 4096);
@@ -5606,7 +5908,7 @@ intel_alloc_context_page(struct drm_device *dev)
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(ctx, 4096);
+ ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -5624,7 +5926,7 @@ intel_alloc_context_page(struct drm_device *dev)
err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
- drm_gem_object_unreference(ctx);
+ drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5736,6 +6038,25 @@ void ironlake_disable_drps(struct drm_device *dev)
}
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 swreq;
+
+ swreq = (val & 0x3ff) << 25;
+ I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5822,7 +6143,96 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
-void intel_init_clock_gating(struct drm_device *dev)
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ __gen6_force_wake_get(dev_priv);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_RC6p_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ 18 << 24 |
+ 6 << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_USE_NORMAL_FREQ |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_MAX |
+ GEN6_RP_DOWN_BUSY_MIN);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ I915_WRITE(GEN6_PMIMR, 0);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ __gen6_force_wake_put(dev_priv);
+}
+
+void intel_enable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5872,9 +6282,9 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
}
/*
* Based on the document from hardware guys the following bits
@@ -5896,7 +6306,49 @@ void intel_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- return;
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ if (IS_GEN5(dev)) {
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+ }
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ I915_WRITE(DSPACNTR,
+ I915_READ(DSPACNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ I915_WRITE(DSPBCNTR,
+ I915_READ(DSPBCNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ }
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5939,20 +6391,18 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(dev_priv->renderctx);
- if (obj_priv) {
- BEGIN_LP_RING(4);
+ struct drm_i915_gem_object *obj = dev_priv->renderctx;
+ if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_SET_CONTEXT);
- OUT_RING(obj_priv->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
+ OUT_RING(obj->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
@@ -5962,29 +6412,45 @@ void intel_init_clock_gating(struct drm_device *dev)
"Disable RC6\n");
}
- if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_i915_gem_object *obj_priv = NULL;
-
+ if (IS_GEN4(dev) && IS_MOBILE(dev)) {
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
if (dev_priv->pwrctx) {
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- } else {
- struct drm_gem_object *pwrctx;
-
- pwrctx = intel_alloc_context_page(dev);
- if (pwrctx) {
- dev_priv->pwrctx = pwrctx;
- obj_priv = to_intel_bo(pwrctx);
- }
- }
-
- if (obj_priv) {
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+ I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
I915_WRITE(MCHBAR_RENDER_STANDBY,
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
}
}
}
+void intel_disable_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ struct drm_i915_gem_object *obj = dev_priv->renderctx;
+
+ I915_WRITE(CCID, 0);
+ POSTING_READ(CCID);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -5997,7 +6463,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.dpms = i9xx_crtc_dpms;
if (I915_HAS_FBC(dev)) {
- if (IS_IRONLAKE_M(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -6046,6 +6512,14 @@ static void intel_init_display(struct drm_device *dev)
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
@@ -6211,7 +6685,7 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
/* Just disable it once at startup */
i915_disable_vga(dev);
@@ -6221,6 +6695,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -6252,28 +6729,12 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->renderctx);
- I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
- I915_READ(CCID);
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(dev_priv->renderctx);
- }
-
- if (dev_priv->pwrctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
- I915_READ(PWRCTXA);
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(dev_priv->pwrctx);
- }
-
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ intel_disable_clock_gating(dev);
mutex_unlock(&dev->struct_mutex);
@@ -6325,3 +6786,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+ struct intel_cursor_error_state {
+ u32 control;
+ u32 position;
+ u32 base;
+ u32 size;
+ } cursor[2];
+
+ struct intel_pipe_error_state {
+ u32 conf;
+ u32 source;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } pipe[2];
+
+ struct intel_plane_error_state {
+ u32 control;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 addr;
+ u32 surface;
+ u32 tile_offset;
+ } plane[2];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_display_error_state *error;
+ int i;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ for (i = 0; i < 2; i++) {
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+
+ error->plane[i].control = I915_READ(DSPCNTR(i));
+ error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos= I915_READ(DSPPOS(i));
+ error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->plane[i].surface = I915_READ(DSPSURF(i));
+ error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ }
+
+ error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(i));
+ error->pipe[i].hblank = I915_READ(HBLANK(i));
+ error->pipe[i].hsync = I915_READ(HSYNC(i));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+ error->pipe[i].vblank = I915_READ(VBLANK(i));
+ error->pipe[i].vsync = I915_READ(VSYNC(i));
+ }
+
+ return error;
+}
+
+void
+intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ seq_printf(m, "Pipe [%d]:\n", i);
+ seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
+ seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
+ seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
+ seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
+ seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
+ seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
+ seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
+
+ seq_printf(m, "Plane [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ seq_printf(m, " POS: %08x\n", error->plane[i].pos);
+ seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ }
+
+ seq_printf(m, "Cursor [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ seq_printf(m, " POS: %08x\n", error->cursor[i].position);
+ seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 864417cffe9..1dc60408d5b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1442,8 +1442,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
- intel_wait_for_vblank(intel_dp->base.base.dev,
- intel_crtc->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e52c6125bb1..d782ad9fd6d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
struct intel_fbdev {
@@ -166,7 +166,7 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
- struct drm_gem_object *cursor_bo;
+ struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
bool enable_stall_check;
@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
@@ -293,19 +294,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void intel_disable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined);
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index af2a1dddc28..701e830d001 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,10 +65,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd;
- struct drm_gem_object *fbo = NULL;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
- int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ int size, ret;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = i915_gem_alloc_object(dev, size);
- if (!fbo) {
+ obj = i915_gem_alloc_object(dev, size);
+ if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
- obj_priv = to_intel_bo(fbo);
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, false);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->par = ifbdev;
- ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
goto out_unpin;
@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
- info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
- size);
+ info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
@@ -153,13 +150,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
// memset(info->screen_base, 0, size);
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
- info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
-
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
@@ -168,7 +160,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
- obj_priv->gtt_offset, fbo);
+ obj->gtt_offset, obj);
mutex_unlock(&dev->struct_mutex);
@@ -176,9 +168,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
return 0;
out_unpin:
- i915_gem_object_unpin(fbo);
+ i915_gem_object_unpin(obj);
out_unref:
- drm_gem_object_unreference(fbo);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
out:
return ret;
@@ -225,7 +217,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
- drm_gem_object_unreference_unlocked(ifb->obj);
+ drm_gem_object_unreference_unlocked(&ifb->obj->base);
ifb->obj = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086e7ee..58040f68ed7 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio)
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ reserved = I915_READ_NOTRACE(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -96,9 +97,9 @@ static int get_clock(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
@@ -106,9 +107,9 @@ static int get_data(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | clock_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
POSTING_READ(gpio->reg);
}
@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | data_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
POSTING_READ(gpio->reg);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 25bcedf386f..aa2307080be 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -304,14 +304,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
- pfit_control |= PFIT_ENABLE;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
- pfit_control |= PFIT_SCALING_PILLAR;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
- pfit_control |= PFIT_SCALING_LETTER;
- else
- pfit_control |= PFIT_SCALING_AUTO;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != mode->hdisplay)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -358,13 +357,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
- pfit_control |= PFIT_SCALING_AUTO;
- else
- pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
+ if (mode->vdisplay != adjusted_mode->vdisplay ||
+ mode->hdisplay != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
break;
default:
@@ -914,6 +917,8 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (INTEL_INFO(dev)->gen >= 5)
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1019,10 +1024,18 @@ bool intel_lvds_init(struct drm_device *dev)
out:
if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
- /* make sure PWM is enabled */
+
+ pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+
+ /* make sure PWM is enabled and locked to the LVDS pipe */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
- pwm |= (PWM_ENABLE | PWM_PIPE_B);
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
+ if (pipe == 0 && (pwm & PWM_PIPE_B))
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
+ if (pipe)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a867ae..f295a7aaadf 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
- if (IS_MOBILE(dev)) {
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (IS_MOBILE(dev))
intel_enable_asle(dev);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock,
- irqflags);
- }
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a481f4..3fbb98b948d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_do_wait_request(dev,
overlay->last_flip_req, true,
- &dev_priv->render_ring);
+ LP_RING(dev_priv));
if (ret)
return ret;
@@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret) {
+ kfree(request);
+ goto out;
+ }
+
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
+ int ret;
BUG_ON(!overlay->active);
@@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ overlay->last_flip_req = request->seqno;
return 0;
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL;
}
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL;
overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
bool interruptible)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
+ int ret;
BUG_ON(!overlay->active);
@@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
/* wait for overlay to go idle */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
@@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
+ interruptible, LP_RING(dev_priv));
if (ret)
return ret;
@@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
@@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
}
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
+ struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers *regs;
bool scale_changed = false;
- struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
struct drm_device *dev = overlay->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
if (ret != 0)
return ret;
@@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
goto out_unpin;
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
if (!overlay->active) {
regs = intel_overlay_map_regs(overlay);
if (!regs) {
@@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
- regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+ regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
regs->OSTRIDE |= params->stride_UV << 16;
}
@@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = to_intel_bo(new_bo);
+ overlay->vid_bo = new_bo;
return 0;
@@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
- struct drm_gem_object *new_bo)
+ struct drm_i915_gem_object *new_bo)
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
break;
@@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y * rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
- if (rec->offset_U + tmp > new_bo->size ||
- rec->offset_V + tmp > new_bo->size)
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
return -EINVAL;
break;
}
@@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct intel_overlay *overlay;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- struct drm_gem_object *new_bo;
+ struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
int ret;
@@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
- new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle));
if (!new_bo) {
ret = -ENOENT;
goto out_free;
@@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = intel_overlay_recover_from_interrupt(overlay, true);
if (ret != 0)
goto out_unlock;
@@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference_unlocked(new_bo);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
out_free:
kfree(params);
@@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_gem_object *reg_bo;
+ struct drm_i915_gem_object *reg_bo;
struct overlay_registers *regs;
int ret;
@@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
- overlay->reg_bo = to_intel_bo(reg_bo);
+ overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ overlay->flip_addr = reg_bo->gtt_offset;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo:
i915_gem_object_unpin(reg_bo);
out_free_bo:
- drm_gem_object_unreference(reg_bo);
+ drm_gem_object_unreference(&reg_bo->base);
out_free:
kfree(overlay);
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f38527..7350ec2515c 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
+static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /* Restore the CTL value if it lost, e.g. GPU reset */
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2);
+ if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+ dev_priv->saveBLC_PWM_CTL2 = val;
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_PCH_CTL2,
+ dev_priv->saveBLC_PWM_CTL);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ } else {
+ val = I915_READ(BLC_PWM_CTL);
+ if (dev_priv->saveBLC_PWM_CTL == 0) {
+ dev_priv->saveBLC_PWM_CTL = val;
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_CTL,
+ dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ }
+
+ return val;
+}
+
u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
+ max = i915_read_blc_pwm_ctl(dev_priv);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ return 1;
+ }
+
if (HAS_PCH_SPLIT(dev)) {
- max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+ max >>= 16;
} else {
- max = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
max >>= 17;
} else {
@@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
- if (max == 0) {
- /* XXX add code here to query mode clock or hardware clock
- * and program max PWM appropriately.
- */
- DRM_ERROR("fixme: max PWM is zero.\n");
- max = 1;
- }
-
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31cd7e33e82..56bc95c056d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
}
static void
-render_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
@@ -109,49 +109,50 @@ render_ring_flush(struct drm_device *dev,
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
#if WATCH_EXEC
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, cmd);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
+ if (intel_ring_begin(ring, 2) == 0) {
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
}
}
-static void ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
RING_ACTHD(ring->mmio_base) : ACTHD;
return I915_READ(acthd_reg);
}
-static int init_ring_common(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_ring_buffer *ring)
{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
u32 head;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(ring->gem_object);
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
- ring->write_tail(dev, ring, 0);
+ ring->write_tail(ring, 0);
/* Initialize the ring. */
- I915_WRITE_START(ring, obj_priv->gtt_offset);
+ I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -178,12 +179,13 @@ static int init_ring_common(struct drm_device *dev,
}
I915_WRITE_CTL(ring,
- ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_REPORT_64K | RING_VALID);
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
- if (head != 0) {
+ if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+ I915_READ_START(ring) != obj->gtt_offset ||
+ (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -194,8 +196,8 @@ static int init_ring_common(struct drm_device *dev,
return -EIO;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
else {
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -203,335 +205,500 @@ static int init_ring_common(struct drm_device *dev,
if (ring->space < 0)
ring->space += ring->size;
}
+
return 0;
}
-static int init_render_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret = init_ring_common(dev, ring);
- int mode;
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ pc->cpu_page = kmap(obj->pages[0]);
+ if (pc->cpu_page == NULL)
+ goto err_unpin;
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ kfree(pc);
+ return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ if (!ring->private)
+ return;
+
+ obj = pc->obj;
+ kunmap(obj->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ kfree(pc);
+ ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_GEN5(dev)) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
-#define PIPE_CONTROL_FLUSH(addr) \
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ cleanup_pipe_control(ring);
+}
+
+static void
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int id;
+
+ /*
+ * cs -> 1 = vcs, 0 = bcs
+ * vcs -> 1 = bcs, 0 = cs,
+ * bcs -> 1 = cs, 0 = vcs.
+ */
+ id = ring - dev_priv->ring;
+ id += 2 - i;
+ id %= 3;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring,
+ RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+}
+
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ u32 seqno;
+ int ret;
+
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ seqno = i915_gem_get_seqno(ring->dev);
+ update_semaphore(ring, 0, seqno);
+ update_semaphore(ring, 1, seqno);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
+
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ intel_ring_sync_index(ring, to) << 17 |
+ MI_SEMAPHORE_COMPARE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
PIPE_CONTROL_DEPTH_STALL | 2); \
- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
- OUT_RING(0); \
- OUT_RING(0); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
} while (0)
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static u32
-render_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
- seqno = i915_gem_get_seqno(dev);
-
- if (IS_GEN6(dev)) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_PIPE_CONTROL | 3);
- OUT_RING(PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else if (HAS_PIPE_CONTROL(dev)) {
- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
- /*
- * Workaround qword write incoherence by flushing the
- * PIPE_NOTIFY buffers out to memory before requesting
- * an interrupt.
- */
- BEGIN_LP_RING(32);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ int ret;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
- return seqno;
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
}
static u32
-render_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (HAS_PIPE_CONTROL(dev))
- return ((volatile u32 *)(dev_priv->seqno_page))[0];
- else
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
-static void
-render_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct pipe_control *pc = ring->private;
+ return pc->cpu_page[0];
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_enable_graphics_irq(dev_priv,
+ GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+ return true;
}
static void
-render_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+render_ring_put_irq(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct drm_device *dev = ring->dev;
+
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_disable_graphics_irq(dev_priv,
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_GEN6(dev)) {
- I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
- } else {
- I915_WRITE(RING_HWS_PGA(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
- }
-
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 mmio = IS_GEN6(ring->dev) ?
+ RING_HWS_PGA_GEN6(ring->mmio_base) :
+ RING_HWS_PGA(ring->mmio_base);
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
}
static void
-bsd_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
-}
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
-static int init_bsd_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return init_ring_common(dev, ring);
+ if (intel_ring_begin(ring, 2) == 0) {
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
}
-static u32
-ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
u32 seqno;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- seqno = i915_gem_get_seqno(dev);
+ seqno = i915_gem_get_seqno(ring->dev);
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-
- return seqno;
+ *result = seqno;
+ return 0;
}
-static void
-bsd_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_graphics_irq(dev_priv, flag);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ return true;
}
+
static void
-bsd_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_disable_graphics_irq(dev_priv, flag);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
}
-static u32
-ring_status_page_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+static void
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
}
static int
-ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- uint32_t exec_start;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
- (2 << 6) | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
return 0;
}
static int
-render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
+ int ret;
trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
- count = nbox ? nbox : 1;
+ if (IS_I830(dev) || IS_845G(dev)) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
+ } else {
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
- intel_ring_emit(dev, ring,
- exec_start | MI_BATCH_NON_SECURE);
- intel_ring_emit(dev, ring, exec_start + exec_len - 4);
- intel_ring_emit(dev, ring, 0);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
} else {
- intel_ring_begin(dev, ring, 2);
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | (2 << 6)
- | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- } else {
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
- | (2 << 6));
- intel_ring_emit(dev, ring, exec_start |
- MI_BATCH_NON_SECURE);
- }
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6));
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
}
- intel_ring_advance(dev, ring);
}
-
- if (IS_G4X(dev) || IS_GEN5(dev)) {
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH |
- MI_NO_WRITE_FLUSH |
- MI_INVALIDATE_ISP );
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- /* XXX breadcrumb */
+ intel_ring_advance(ring);
return 0;
}
-static void cleanup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
if (obj == NULL)
return;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
+ kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
-static int init_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_alloc_object(dev, 4096);
@@ -540,16 +707,15 @@ static int init_status_page(struct drm_device *dev,
ret = -ENOMEM;
goto err;
}
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
- ret = i915_gem_object_pin(obj, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = obj_priv->gtt_offset;
- ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
@@ -557,7 +723,7 @@ static int init_status_page(struct drm_device *dev,
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -566,7 +732,7 @@ static int init_status_page(struct drm_device *dev,
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
err:
return ret;
}
@@ -574,9 +740,7 @@ err:
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
ring->dev = dev;
@@ -585,7 +749,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->gpu_write_list);
if (I915_NEED_GFX_HWS(dev)) {
- ret = init_status_page(dev, ring);
+ ret = init_status_page(ring);
if (ret)
return ret;
}
@@ -597,15 +761,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
goto err_hws;
}
- ring->gem_object = obj;
+ ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret)
goto err_unref;
- obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.offset = dev->agp->base + obj->gtt_offset;
ring->map.type = 0;
ring->map.flags = 0;
ring->map.mtrr = 0;
@@ -618,60 +781,57 @@ int intel_init_ring_buffer(struct drm_device *dev,
}
ring->virtual_start = ring->map.handle;
- ret = ring->init(dev, ring);
+ ret = ring->init(ring);
if (ret)
goto err_unmap;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
- }
- return ret;
+ return 0;
err_unmap:
drm_core_ioremapfree(&ring->map, dev);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
- ring->gem_object = NULL;
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
err_hws:
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
return ret;
}
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
{
- if (ring->gem_object == NULL)
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
return;
- drm_core_ioremapfree(&ring->map, dev);
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_wait_ring_buffer(ring, ring->size - 8);
+ I915_WRITE_CTL(ring, 0);
- i915_gem_object_unpin(ring->gem_object);
- drm_gem_object_unreference(ring->gem_object);
- ring->gem_object = NULL;
+ drm_core_ioremapfree(&ring->map, ring->dev);
+
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
}
-static int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
unsigned int *virt;
int rem;
rem = ring->size - ring->tail;
if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(dev, ring, rem);
+ int ret = intel_wait_ring_buffer(ring, rem);
if (ret)
return ret;
}
@@ -689,11 +849,11 @@ static int intel_wrap_ring_buffer(struct drm_device *dev,
return 0;
}
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 head;
trace_i915_ring_wait_begin (dev);
@@ -711,7 +871,7 @@ int intel_wait_ring_buffer(struct drm_device *dev,
if (ring->space < 0)
ring->space += ring->size;
if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
+ trace_i915_ring_wait_end(dev);
return 0;
}
@@ -722,29 +882,39 @@ int intel_wait_ring_buffer(struct drm_device *dev,
}
msleep(1);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EAGAIN;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
}
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+ int num_dwords)
{
int n = 4*num_dwords;
- if (unlikely(ring->tail + n > ring->size))
- intel_wrap_ring_buffer(dev, ring);
- if (unlikely(ring->space < n))
- intel_wait_ring_buffer(dev, ring, n);
+ int ret;
+
+ if (unlikely(ring->tail + n > ring->size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < n)) {
+ ret = intel_wait_ring_buffer(ring, n);
+ if (unlikely(ret))
+ return ret;
+ }
ring->space -= n;
+ return 0;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_advance(struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
- ring->write_tail(dev, ring, ring->tail);
+ ring->write_tail(ring, ring->tail);
}
static const struct intel_ring_buffer render_ring = {
@@ -756,10 +926,11 @@ static const struct intel_ring_buffer render_ring = {
.write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
- .get_seqno = render_ring_get_seqno,
- .user_irq_get = render_ring_get_user_irq,
- .user_irq_put = render_ring_put_user_irq,
- .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = render_ring_get_irq,
+ .irq_put = render_ring_put_irq,
+ .dispatch_execbuffer = render_ring_dispatch_execbuffer,
+ .cleanup = render_ring_cleanup,
};
/* ring buffer for bit-stream decoder */
@@ -769,22 +940,21 @@ static const struct intel_ring_buffer bsd_ring = {
.id = RING_BSD,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
+ .init = init_ring_common,
.write_tail = ring_write_tail,
.flush = bsd_ring_flush,
.add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = bsd_ring_get_irq,
+ .irq_put = bsd_ring_put_irq,
+ .dispatch_execbuffer = ring_dispatch_execbuffer,
};
-static void gen6_bsd_ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -803,69 +973,80 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}
-static void gen6_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void gen6_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
+
+ if (intel_ring_begin(ring, 4) == 0) {
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+ }
}
static int
-gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
- uint32_t exec_start;
+ int ret;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
return 0;
}
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+}
+
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = RING_BSD,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .name = "gen6 bsd ring",
+ .id = RING_BSD,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = gen6_bsd_ring_get_irq,
+ .irq_put = gen6_bsd_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
};
/* Blitter support (SandyBridge+) */
-static void
-blt_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
}
+
static void
-blt_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+blt_ring_put_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
}
@@ -883,32 +1064,31 @@ to_blt_workaround(struct intel_ring_buffer *ring)
return ring->private;
}
-static int blt_ring_init(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int blt_ring_init(struct intel_ring_buffer *ring)
{
- if (NEED_BLT_WORKAROUND(dev)) {
+ if (NEED_BLT_WORKAROUND(ring->dev)) {
struct drm_i915_gem_object *obj;
- u32 __iomem *ptr;
+ u32 *ptr;
int ret;
- obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+ obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL)
return -ENOMEM;
- ret = i915_gem_object_pin(&obj->base, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
ptr = kmap(obj->pages[0]);
- iowrite32(MI_BATCH_BUFFER_END, ptr);
- iowrite32(MI_NOOP, ptr+1);
+ *ptr++ = MI_BATCH_BUFFER_END;
+ *ptr++ = MI_NOOP;
kunmap(obj->pages[0]);
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
- i915_gem_object_unpin(&obj->base);
+ i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
@@ -916,51 +1096,39 @@ static int blt_ring_init(struct drm_device *dev,
ring->private = obj;
}
- return init_ring_common(dev, ring);
+ return init_ring_common(ring);
}
-static void blt_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static int blt_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
if (ring->private) {
- intel_ring_begin(dev, ring, num_dwords+2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+ int ret = intel_ring_begin(ring, num_dwords+2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+ return 0;
} else
- intel_ring_begin(dev, ring, 4);
+ return intel_ring_begin(ring, 4);
}
-static void blt_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
-}
-
-static u32
-blt_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
-{
- u32 seqno = i915_gem_get_seqno(dev);
-
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
- return seqno;
+ if (blt_ring_begin(ring, 4) == 0) {
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+ }
}
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -981,47 +1149,54 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.init = blt_ring_init,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
- .add_request = blt_ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = blt_ring_get_user_irq,
- .user_irq_put = blt_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.cleanup = blt_ring_cleanup,
};
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- dev_priv->render_ring = render_ring;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
- memset(dev_priv->render_ring.status_page.page_addr,
- 0, PAGE_SIZE);
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
}
- return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ return intel_init_ring_buffer(dev, ring);
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
if (IS_GEN6(dev))
- dev_priv->bsd_ring = gen6_bsd_ring;
+ *ring = gen6_bsd_ring;
else
- dev_priv->bsd_ring = bsd_ring;
+ *ring = bsd_ring;
- return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ return intel_init_ring_buffer(dev, ring);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- dev_priv->blt_ring = gen6_blt_ring;
+ *ring = gen6_blt_ring;
- return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+ return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d2cd0f1efee..8e2e357ad6e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,37 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+enum {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ I915_NUM_RINGS,
+};
+
struct intel_hw_status_page {
- void *page_addr;
+ u32 __iomem *page_addr;
unsigned int gfx_addr;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+
+#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
-#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+
+#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
-#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+
+#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
-#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+
+#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
-struct drm_i915_gem_execbuffer2;
+#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
+
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
@@ -25,45 +40,36 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
- unsigned long size;
void *virtual_start;
struct drm_device *dev;
- struct drm_gem_object *gem_object;
+ struct drm_i915_gem_object *obj;
u32 actual_head;
u32 head;
u32 tail;
int space;
+ int size;
struct intel_hw_status_page status_page;
- u32 irq_gem_seqno; /* last seq seem at irq time */
- u32 waiting_gem_seqno;
- int user_irq_refcount;
- void (*user_irq_get)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- void (*user_irq_put)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ u32 irq_seqno; /* last seq seem at irq time */
+ u32 waiting_seqno;
+ u32 sync_seqno[I915_NUM_RINGS-1];
+ atomic_t irq_refcount;
+ bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
+ void (*irq_put)(struct intel_ring_buffer *ring);
- int (*init)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ int (*init)(struct intel_ring_buffer *ring);
- void (*write_tail)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+ void (*write_tail)(struct intel_ring_buffer *ring,
u32 value);
- void (*flush)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains);
- u32 (*add_request)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains);
- u32 (*get_seqno)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- int (*dispatch_gem_execbuffer)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset);
+ void (*flush)(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*add_request)(struct intel_ring_buffer *ring,
+ u32 *seqno);
+ u32 (*get_seqno)(struct intel_ring_buffer *ring);
+ int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
/**
@@ -96,7 +102,7 @@ struct intel_ring_buffer {
/**
* Do we have some not yet emitted requests outstanding?
*/
- bool outstanding_lazy_request;
+ u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
drm_local_map_t map;
@@ -105,44 +111,54 @@ struct intel_ring_buffer {
};
static inline u32
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
- int reg)
+ int reg)
{
- u32 *regs = ring->status_page.page_addr;
- return regs[reg];
+ return ioread32(ring->status_page.page_addr + reg);
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- unsigned int data)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
+ iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+void intel_ring_advance(struct intel_ring_buffer *ring);
-u32 intel_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6bc42fa2a6e..9d0af36a13e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1045,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
- sdvox = SDVO_BORDER_ENABLE;
+ sdvox = 0;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1075,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f768198931..93206e4eaa6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
int type;
/* Disable TV interrupts around load detect or we'll recurse */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
save_tv_dac = tv_dac = I915_READ(TV_DAC);
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
I915_WRITE(TV_CTL, save_tv_ctl);
/* Restore interrupt config */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return type;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 72730e9ca06..21d6c29c2d2 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,7 +10,7 @@ config DRM_NOUVEAU
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
- select ACPI_VIDEO if ACPI
+ select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
help
Choose this option for open-source nVidia support.
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d667d..e12c97fd8db 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,27 +5,32 @@
ccflags-y := -Iinclude/drm
nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o \
- nouveau_sgdma.o nouveau_dma.o \
+ nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
+ nouveau_mm.o nouveau_vm.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
- nv40_grctx.o nv50_grctx.o \
+ nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+ nv84_crypt.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
- nv50_crtc.o nv50_dac.o nv50_sor.o \
- nv50_cursor.o nv50_display.o nv50_fbcon.o \
+ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
+ nv50_cursor.o nv50_display.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
- nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+ nv04_crtc.o nv04_display.o nv04_cursor.o \
+ nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
nv10_gpio.o nv50_gpio.o \
nv50_calc.o \
- nv04_pm.o nv50_pm.o nva3_pm.o
+ nv04_pm.o nv50_pm.o nva3_pm.o \
+ nv50_vram.o nvc0_vram.o \
+ nv50_vm.o nvc0_vm.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 119152606e4..a54238058dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -130,10 +130,15 @@ static int nouveau_dsm_init(void)
static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
- if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ /* easy option one - intel vendor ID means Integrated */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
return VGA_SWITCHEROO_IGD;
- else
- return VGA_SWITCHEROO_DIS;
+
+ /* is this device on Bus 0? - this may need improving */
+ if (pdev->bus->number == 0)
+ return VGA_SWITCHEROO_IGD;
+
+ return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler nouveau_dsm_handler = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b2293576f27..d3046559bf0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
return entry;
}
-static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
+static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
+ int heads, int or)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
- entry->type = 0;
+ entry->type = type;
entry->i2c_index = i2c;
entry->heads = heads;
- entry->location = DCB_LOC_ON_CHIP;
- entry->or = 1;
-}
-
-static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
-
- entry->type = 2;
- entry->i2c_index = LEGACY_I2C_PANEL;
- entry->heads = twoHeads ? 3 : 1;
- entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
- entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
- entry->duallink_possible = false; /* SiI164 and co. are single link */
-
-#if 0
- /*
- * For dvi-a either crtc probably works, but my card appears to only
- * support dvi-d. "nvidia" still attempts to program it for dvi-a,
- * doing the full fp output setup (program 0x6808.. fp dimension regs,
- * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
- * the monitor picks up the mode res ok and lights up, but no pixel
- * data appears, so the board manufacturer probably connected up the
- * sync lines, but missed the video traces / components
- *
- * with this introduction, dvi-a left as an exercise for the reader.
- */
- fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
-#endif
-}
-
-static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
-
- entry->type = 1;
- entry->i2c_index = LEGACY_I2C_TV;
- entry->heads = twoHeads ? 3 : 1;
- entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ if (type != OUTPUT_ANALOG)
+ entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ entry->or = or;
}
static bool
@@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
return true;
}
+static void
+fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ int all_heads = (nv_two_heads(dev) ? 3 : 1);
+
+#ifdef __powerpc__
+ /* Apple iMac G4 NV17 */
+ if (of_machine_is_compatible("PowerMac4,5")) {
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
+ return;
+ }
+#endif
+
+ /* Make up some sane defaults */
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+ all_heads, 0);
+
+ else if (bios->tmds.output0_script_ptr ||
+ bios->tmds.output1_script_ptr)
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+ all_heads, 1);
+}
+
static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
/* this situation likely means a really old card, pre DCB */
if (dcbptr == 0x0) {
- NV_INFO(dev, "Assuming a CRT output exists\n");
- fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
- if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
- fabricate_tv_output(dcb, twoHeads);
-
+ fabricate_dcb_encoder_table(dev, bios);
return 0;
}
@@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
*/
NV_TRACEWARN(dev, "No useful information in BIOS output table; "
"adding all possible outputs\n");
- fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
- /*
- * Attempt to detect TV before DVI because the test
- * for the former is more accurate and it rules the
- * latter out.
- */
- if (nv04_tv_identify(dev,
- bios->legacy.i2c_indices.tv) >= 0)
- fabricate_tv_output(dcb, twoHeads);
-
- else if (bios->tmds.output0_script_ptr ||
- bios->tmds.output1_script_ptr)
- fabricate_dvi_i_output(dcb, twoHeads);
-
+ fabricate_dcb_encoder_table(dev, bios);
return 0;
}
@@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev)
if (ret)
return ret;
- ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+ ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c200ef..a7fae26f465 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
#include <linux/log2.h>
#include <linux/slab.h>
@@ -46,82 +48,51 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
- if (nvbo->tile)
- nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
-
+ nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+ nouveau_vm_put(&nvbo->vma);
kfree(nvbo);
}
static void
-nouveau_bo_fixup_align(struct drm_device *dev,
- uint32_t tile_mode, uint32_t tile_flags,
- int *align, int *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
+ int *page_shift)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /*
- * Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Align the size to the
- * appropriate boundaries. This does imply that sizes are rounded up
- * 3-7 pages, so be aware of this and do not waste memory by allocating
- * many small buffers.
- */
- if (dev_priv->card_type == NV_50) {
- uint32_t block_size = dev_priv->vram_size >> 15;
- int i;
-
- switch (tile_flags) {
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7a00:
- if (is_power_of_2(block_size)) {
- for (i = 1; i < 10; i++) {
- *align = 12 * i * block_size;
- if (!(*align % 65536))
- break;
- }
- } else {
- for (i = 1; i < 10; i++) {
- *align = 8 * i * block_size;
- if (!(*align % 65536))
- break;
- }
- }
- *size = roundup(*size, *align);
- break;
- default:
- break;
- }
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- } else {
- if (tile_mode) {
+ if (dev_priv->card_type < NV_50) {
+ if (nvbo->tile_mode) {
if (dev_priv->chipset >= 0x40) {
*align = 65536;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x30) {
*align = 32768;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x20) {
*align = 16384;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x10) {
*align = 16384;
- *size = roundup(*size, 32 * tile_mode);
+ *size = roundup(*size, 32 * nvbo->tile_mode);
}
}
+ } else {
+ if (likely(dev_priv->chan_vm)) {
+ if (*size > 256 * 1024)
+ *page_shift = dev_priv->chan_vm->lpg_shift;
+ else
+ *page_shift = dev_priv->chan_vm->spg_shift;
+ } else {
+ *page_shift = 12;
+ }
+
+ *size = roundup(*size, (1 << *page_shift));
+ *align = max((1 << *page_shift), *align);
}
- /* ALIGN works only on powers of two. */
*size = roundup(*size, PAGE_SIZE);
-
- if (dev_priv->card_type == NV_50) {
- *size = roundup(*size, 65536);
- *align = max(65536, *align);
- }
}
int
@@ -132,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret = 0;
+ int ret = 0, page_shift = 0;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -145,10 +116,18 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
- &align, &size);
+ nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
+ if (!nvbo->no_vm && dev_priv->chan_vm) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+ NV_MEM_ACCESS_RW, &nvbo->vma);
+ if (ret) {
+ kfree(nvbo);
+ return ret;
+ }
+ }
+
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -161,6 +140,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
+ if (nvbo->vma.node) {
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nvbo->bo.offset = nvbo->vma.offset;
+ }
+
*pnvbo = nvbo;
return 0;
}
@@ -244,7 +228,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -280,7 +264,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -319,6 +303,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
ttm_bo_kunmap(&nvbo->kmap);
}
+int
+nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
+{
+ int ret;
+
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
+ no_wait_reserve, no_wait_gpu);
+ if (ret)
+ return ret;
+
+ if (nvbo->vma.node) {
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nvbo->bo.offset = nvbo->vma.offset;
+ }
+
+ return 0;
+}
+
u16
nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
{
@@ -410,37 +413,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
+ if (dev_priv->card_type >= NV_50) {
+ man->func = &nouveau_vram_manager;
+ man->io_reserve_fastpath = false;
+ man->use_io_reserve_lru = true;
+ } else {
+ man->func = &ttm_bo_manager_func;
+ }
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- if (dev_priv->card_type == NV_50)
- man->gpu_offset = 0x40000000;
- else
- man->gpu_offset = 0;
break;
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
break;
case NOUVEAU_GART_SGDMA:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
+ man->gpu_offset = dev_priv->gart_info.aper_base;
break;
default:
NV_ERROR(dev, "Unknown GART type: %d\n",
dev_priv->gart_info.type);
return -EINVAL;
}
- man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +491,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- if (nvbo->channel) {
- ret = nouveau_fence_sync(fence, nvbo->channel);
- if (ret)
- goto out;
- }
-
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
no_wait_reserve, no_wait_gpu, new_mem);
-out:
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
return ret;
}
@@ -516,6 +515,58 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
}
static int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ u64 src_offset = old_mem->start << PAGE_SHIFT;
+ u64 dst_offset = new_mem->start << PAGE_SHIFT;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ if (!nvbo->no_vm) {
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ src_offset = nvbo->vma.offset;
+ else
+ src_offset += dev_priv->gart_info.aper_base;
+
+ if (new_mem->mem_type == TTM_PL_VRAM)
+ dst_offset = nvbo->vma.offset;
+ else
+ dst_offset += dev_priv->gart_info.aper_base;
+ }
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, PAGE_SIZE); /* src_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* line_length */
+ OUT_RING (chan, line_count);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+ OUT_RING (chan, 0x00100110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -529,14 +580,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
dst_offset = new_mem->start << PAGE_SHIFT;
if (!nvbo->no_vm) {
if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset += dev_priv->vm_vram_base;
+ src_offset = nvbo->vma.offset;
else
- src_offset += dev_priv->vm_gart_base;
+ src_offset += dev_priv->gart_info.aper_base;
if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset += dev_priv->vm_vram_base;
+ dst_offset = nvbo->vma.offset;
else
- dst_offset += dev_priv->vm_gart_base;
+ dst_offset += dev_priv->gart_info.aper_base;
}
ret = RING_SPACE(chan, 3);
@@ -683,17 +734,27 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm)
+ if (!chan || nvbo->no_vm) {
chan = dev_priv->channel;
+ mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+ }
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
+ if (dev_priv->card_type < NV_C0)
ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- if (ret)
- return ret;
+ else
+ ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ if (ret == 0) {
+ ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
+ no_wait_reserve,
+ no_wait_gpu, new_mem);
+ }
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ if (chan == dev_priv->channel)
+ mutex_unlock(&chan->mutex);
+ return ret;
}
static int
@@ -771,7 +832,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
uint64_t offset;
- int ret;
if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
/* Nothing to do. */
@@ -781,18 +841,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->card_type == NV_50) {
- ret = nv50_mem_vm_bind_linear(dev,
- offset + dev_priv->vm_vram_base,
- new_mem->size,
- nouveau_bo_tile_layout(nvbo),
- offset);
- if (ret)
- return ret;
-
+ if (dev_priv->chan_vm) {
+ nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
} else if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
- nvbo->tile_mode);
+ nvbo->tile_mode,
+ nvbo->tile_flags);
}
return 0;
@@ -808,9 +862,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
if (dev_priv->card_type >= NV_10 &&
dev_priv->card_type < NV_50) {
- if (*old_tile)
- nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
-
+ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
}
}
@@ -879,6 +931,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
+ int ret;
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -901,9 +954,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
+ {
+ struct nouveau_vram *vram = mem->mm_node;
+ u8 page_shift;
+
+ if (!dev_priv->bar1_vm) {
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(dev->pdev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ }
+
+ if (dev_priv->card_type == NV_C0)
+ page_shift = vram->page_shift;
+ else
+ page_shift = 12;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
+ page_shift, NV_MEM_ACCESS_RW,
+ &vram->bar_vma);
+ if (ret)
+ return ret;
+
+ nouveau_vm_map(&vram->bar_vma, vram);
+ if (ret) {
+ nouveau_vm_put(&vram->bar_vma);
+ return ret;
+ }
+
+ mem->bus.offset = vram->bar_vma.offset;
+ if (dev_priv->card_type == NV_50) /*XXX*/
+ mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
+ }
break;
default:
return -EINVAL;
@@ -914,6 +998,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct nouveau_vram *vram = mem->mm_node;
+
+ if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
+ return;
+
+ if (!vram->bar_vma.node)
+ return;
+
+ nouveau_vm_unmap(&vram->bar_vma);
+ nouveau_vm_put(&vram->bar_vma);
}
static int
@@ -939,7 +1034,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
- return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
+ return nouveau_bo_validate(nvbo, false, true, false);
+}
+
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+ struct nouveau_fence *old_fence;
+
+ if (likely(fence))
+ nouveau_fence_ref(fence);
+
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ old_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = fence;
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+ nouveau_fence_unref(&old_fence);
}
struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +1060,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
.evict_flags = nouveau_bo_evict_flags,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = nouveau_fence_signalled,
- .sync_obj_wait = nouveau_fence_wait,
- .sync_obj_flush = nouveau_fence_flush,
- .sync_obj_unref = nouveau_fence_unref,
- .sync_obj_ref = nouveau_fence_ref,
+ .sync_obj_signaled = __nouveau_fence_signalled,
+ .sync_obj_wait = __nouveau_fence_wait,
+ .sync_obj_flush = __nouveau_fence_flush,
+ .sync_obj_unref = __nouveau_fence_unref,
+ .sync_obj_ref = __nouveau_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e3481..3960d66d7ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -38,23 +38,28 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
int ret;
if (dev_priv->card_type >= NV_50) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
- dev_priv->vm_end, NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_AGP, &pushbuf);
+ if (dev_priv->card_type < NV_C0) {
+ ret = nouveau_gpuobj_dma_new(chan,
+ NV_CLASS_DMA_IN_MEMORY, 0,
+ (1ULL << 40),
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_VM,
+ &pushbuf);
+ }
chan->pushbuf_base = pb->bo.offset;
} else
if (pb->bo.mem.mem_type == TTM_PL_TT) {
- ret = nouveau_gpuobj_gart_dma_new(chan, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RO, &pushbuf,
- NULL);
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_GART, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
- NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_VIDMEM, &pushbuf);
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_VRAM, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,17 +67,16 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
* VRAM.
*/
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- pci_resource_start(dev->pdev,
- 1),
+ pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size,
- NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_PCI, &pushbuf);
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_PCI, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
}
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
nouveau_gpuobj_ref(NULL, &pushbuf);
- return 0;
+ return ret;
}
static struct nouveau_bo *
@@ -100,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
return NULL;
}
+ ret = nouveau_bo_map(pushbuf);
+ if (ret) {
+ nouveau_bo_unpin(pushbuf);
+ nouveau_bo_ref(NULL, &pushbuf);
+ return NULL;
+ }
+
return pushbuf;
}
@@ -107,74 +118,59 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
int
nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
- uint32_t vram_handle, uint32_t tt_handle)
+ uint32_t vram_handle, uint32_t gart_handle)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
- int channel, user;
+ unsigned long flags;
int ret;
- /*
- * Alright, here is the full story
- * Nvidia cards have multiple hw fifo contexts (praise them for that,
- * no complicated crash-prone context switches)
- * We allocate a new context for each app and let it write to it
- * directly (woo, full userspace command submission !)
- * When there are no more contexts, you lost
- */
- for (channel = 0; channel < pfifo->channels; channel++) {
- if (dev_priv->fifos[channel] == NULL)
+ /* allocate and lock channel structure */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
+ chan->file_priv = file_priv;
+ chan->vram_handle = vram_handle;
+ chan->gart_handle = gart_handle;
+
+ kref_init(&chan->ref);
+ atomic_set(&chan->users, 1);
+ mutex_init(&chan->mutex);
+ mutex_lock(&chan->mutex);
+
+ /* allocate hw channel id */
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
+ if (!dev_priv->channels.ptr[chan->id]) {
+ nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
break;
+ }
}
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- /* no more fifos. you lost. */
- if (channel == pfifo->channels)
- return -EINVAL;
+ if (chan->id == pfifo->channels) {
+ mutex_unlock(&chan->mutex);
+ kfree(chan);
+ return -ENODEV;
+ }
- dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
- GFP_KERNEL);
- if (!dev_priv->fifos[channel])
- return -ENOMEM;
- chan = dev_priv->fifos[channel];
+ NV_DEBUG(dev, "initialising channel %d\n", chan->id);
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+ INIT_LIST_HEAD(&chan->nvsw.flip);
INIT_LIST_HEAD(&chan->fence.pending);
- chan->dev = dev;
- chan->id = channel;
- chan->file_priv = file_priv;
- chan->vram_handle = vram_handle;
- chan->gart_handle = tt_handle;
-
- NV_INFO(dev, "Allocating FIFO number %d\n", channel);
/* Allocate DMA push buffer */
chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
if (!chan->pushbuf_bo) {
ret = -ENOMEM;
NV_ERROR(dev, "pushbuf %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
nouveau_dma_pre_init(chan);
-
- /* Locate channel's user control regs */
- if (dev_priv->card_type < NV_40)
- user = NV03_USER(channel);
- else
- if (dev_priv->card_type < NV_50)
- user = NV40_USER(channel);
- else
- user = NV50_USER(channel);
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
- PAGE_SIZE);
- if (!chan->user) {
- NV_ERROR(dev, "ioremap of regs failed.\n");
- nouveau_channel_free(chan);
- return -ENOMEM;
- }
chan->user_put = 0x40;
chan->user_get = 0x44;
@@ -182,15 +178,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_notifier_init_channel(chan);
if (ret) {
NV_ERROR(dev, "ntfy %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
/* Setup channel's default objects */
- ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
if (ret) {
NV_ERROR(dev, "gpuobj %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
@@ -198,24 +194,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_channel_pushbuf_ctxdma_init(chan);
if (ret) {
NV_ERROR(dev, "pbctxdma %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
/* disable the fifo caches */
pfifo->reassign(dev, false);
- /* Create a graphics context for new channel */
- ret = pgraph->create_context(chan);
- if (ret) {
- nouveau_channel_free(chan);
- return ret;
- }
-
/* Construct inital RAMFC for new channel */
ret = pfifo->create_context(chan);
if (ret) {
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
@@ -225,83 +214,111 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
if (!ret)
ret = nouveau_fence_channel_init(chan);
if (ret) {
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
nouveau_debugfs_channel_init(chan);
- NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+ NV_DEBUG(dev, "channel %d initialised\n", chan->id);
*chan_ret = chan;
return 0;
}
-/* stops a fifo */
+struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *ref)
+{
+ struct nouveau_channel *chan = NULL;
+
+ if (likely(ref && atomic_inc_not_zero(&ref->users)))
+ nouveau_channel_ref(ref, &chan);
+
+ return chan;
+}
+
+struct nouveau_channel *
+nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+
+ if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+ if (unlikely(!chan))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(file_priv && chan->file_priv != file_priv)) {
+ nouveau_channel_put_unlocked(&chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&chan->mutex);
+ return chan;
+}
+
void
-nouveau_channel_free(struct nouveau_channel *chan)
+nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
{
+ struct nouveau_channel *chan = *pchan;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
unsigned long flags;
- int ret;
- NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+ /* decrement the refcount, and we're done if there's still refs */
+ if (likely(!atomic_dec_and_test(&chan->users))) {
+ nouveau_channel_ref(NULL, pchan);
+ return;
+ }
+ /* noone wants the channel anymore */
+ NV_DEBUG(dev, "freeing channel %d\n", chan->id);
nouveau_debugfs_channel_fini(chan);
- /* Give outstanding push buffers a chance to complete */
- nouveau_fence_update(chan);
- if (chan->fence.sequence != chan->fence.sequence_ack) {
- struct nouveau_fence *fence = NULL;
+ /* give it chance to idle */
+ nouveau_channel_idle(chan);
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret == 0) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- nouveau_fence_unref((void *)&fence);
- }
-
- if (ret)
- NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
- }
-
- /* Ensure all outstanding fences are signaled. They should be if the
+ /* ensure all outstanding fences are signaled. they should be if the
* above attempts at idling were OK, but if we failed this'll tell TTM
* we're done with the buffers.
*/
nouveau_fence_channel_fini(chan);
- /* This will prevent pfifo from switching channels. */
+ /* boot it off the hardware */
pfifo->reassign(dev, false);
- /* We want to give pgraph a chance to idle and get rid of all potential
- * errors. We need to do this before the lock, otherwise the irq handler
- * is unable to process them.
+ /* We want to give pgraph a chance to idle and get rid of all
+ * potential errors. We need to do this without the context
+ * switch lock held, otherwise the irq handler is unable to
+ * process them.
*/
if (pgraph->channel(dev) == chan)
nouveau_wait_for_idle(dev);
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- pgraph->fifo_access(dev, false);
- if (pgraph->channel(dev) == chan)
- pgraph->unload_context(dev);
- pgraph->destroy_context(chan);
- pgraph->fifo_access(dev, true);
-
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
- }
+ /* destroy the engine specific contexts */
pfifo->destroy_context(chan);
+ pgraph->destroy_context(chan);
+ if (pcrypt->destroy_context)
+ pcrypt->destroy_context(chan);
pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ /* aside from its resources, the channel should now be dead,
+ * remove it from the channel list
+ */
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- /* Release the channel's resources */
+ /* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +327,80 @@ nouveau_channel_free(struct nouveau_channel *chan)
}
nouveau_gpuobj_channel_takedown(chan);
nouveau_notifier_takedown_channel(chan);
- if (chan->user)
- iounmap(chan->user);
- dev_priv->fifos[chan->id] = NULL;
+ nouveau_channel_ref(NULL, pchan);
+}
+
+void
+nouveau_channel_put(struct nouveau_channel **pchan)
+{
+ mutex_unlock(&(*pchan)->mutex);
+ nouveau_channel_put_unlocked(pchan);
+}
+
+static void
+nouveau_channel_del(struct kref *ref)
+{
+ struct nouveau_channel *chan =
+ container_of(ref, struct nouveau_channel, ref);
+
kfree(chan);
}
+void
+nouveau_channel_ref(struct nouveau_channel *chan,
+ struct nouveau_channel **pchan)
+{
+ if (chan)
+ kref_get(&chan->ref);
+
+ if (*pchan)
+ kref_put(&(*pchan)->ref, nouveau_channel_del);
+
+ *pchan = chan;
+}
+
+void
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ nouveau_fence_update(chan);
+
+ if (chan->fence.sequence != chan->fence.sequence_ack) {
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
+ }
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ }
+}
+
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_channel *chan;
int i;
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < engine->fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ chan = nouveau_channel_get(dev, file_priv, i);
+ if (IS_ERR(chan))
+ continue;
- if (chan && chan->file_priv == file_priv)
- nouveau_channel_free(chan);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
}
}
-int
-nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
- int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
-
- if (channel >= engine->fifo.channels)
- return 0;
- if (dev_priv->fifos[channel] == NULL)
- return 0;
-
- return (dev_priv->fifos[channel]->file_priv == file_priv);
-}
/***********************************
* ioctls wrapping the functions
@@ -383,36 +436,44 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- init->subchan[0].handle = NvM2MF;
- if (dev_priv->card_type < NV_50)
- init->subchan[0].grclass = 0x0039;
- else
- init->subchan[0].grclass = 0x5039;
- init->subchan[1].handle = NvSw;
- init->subchan[1].grclass = NV_SW;
- init->nr_subchan = 2;
+ if (dev_priv->card_type < NV_C0) {
+ init->subchan[0].handle = NvM2MF;
+ if (dev_priv->card_type < NV_50)
+ init->subchan[0].grclass = 0x0039;
+ else
+ init->subchan[0].grclass = 0x5039;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
+ } else {
+ init->subchan[0].handle = 0x9039;
+ init->subchan[0].grclass = 0x9039;
+ init->nr_subchan = 1;
+ }
/* Named memory object area */
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
&init->notifier_handle);
- if (ret) {
- nouveau_channel_free(chan);
- return ret;
- }
- return 0;
+ if (ret == 0)
+ atomic_inc(&chan->users); /* userspace reference */
+ nouveau_channel_put(&chan);
+ return ret;
}
static int
nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_channel_free *cfree = data;
+ struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- nouveau_channel_free(chan);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
return 0;
}
@@ -421,18 +482,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e9a3d..a21e0007683 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
#include "nouveau_connector.h"
#include "nouveau_hw.h"
+static void nouveau_connector_hotplug(void *, int);
+
static struct nouveau_encoder *
find_encoder_by_type(struct drm_connector *connector, int type)
{
@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector)
}
static void
-nouveau_connector_destroy(struct drm_connector *drm_connector)
+nouveau_connector_destroy(struct drm_connector *connector)
{
- struct nouveau_connector *nv_connector =
- nouveau_connector(drm_connector);
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
return;
dev = nv_connector->base.dev;
+ dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
+ pgpio = &dev_priv->engine.gpio;
+ if (pgpio->irq_unregister) {
+ pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
+ }
+
kfree(nv_connector->edid);
- drm_sysfs_connector_remove(drm_connector);
- drm_connector_cleanup(drm_connector);
- kfree(drm_connector);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
}
static struct nouveau_i2c_chan *
@@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_connector *nv_connector = NULL;
struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
@@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
+ if (pgpio->irq_register) {
+ pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
+ }
+
drm_sysfs_connector_add(connector);
dcb->drm = connector;
return dcb->drm;
@@ -886,3 +902,29 @@ fail:
return ERR_PTR(ret);
}
+
+static void
+nouveau_connector_hotplug(void *data, int plugged)
+{
+ struct drm_connector *connector = data;
+ struct drm_device *dev = connector->dev;
+
+ NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
+
+ if (connector->encoder && connector->encoder->crtc &&
+ connector->encoder->crtc->enabled) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
+ struct drm_encoder_helper_funcs *helper =
+ connector->encoder->helper_private;
+
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ if (plugged)
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+ else
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ }
+ }
+
+ drm_helper_hpd_irq_event(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd65b4d..505c6bfb4d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
#include "nouveau_drv.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
+#include "nouveau_hw.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
+int
+nouveau_vblank_enable(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
+ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
+ else
+ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
+ NV_PCRTC_INTR_0_VBLANK);
+
+ return 0;
+}
+
+void
+nouveau_vblank_disable(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
+ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
+ else
+ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo)
+{
+ int ret;
+
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail_unreserve;
+
+ return 0;
+
+fail_unreserve:
+ ttm_bo_unreserve(&new_bo->bo);
+fail:
+ nouveau_bo_unpin(new_bo);
+ return ret;
+}
+
+static void
+nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo,
+ struct nouveau_fence *fence)
+{
+ nouveau_bo_fence(new_bo, fence);
+ ttm_bo_unreserve(&new_bo->bo);
+
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
+
+ nouveau_bo_unpin(old_bo);
+}
+
+static int
+nouveau_page_flip_emit(struct nouveau_channel *chan,
+ struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo,
+ struct nouveau_page_flip_state *s,
+ struct nouveau_fence **pfence)
+{
+ struct drm_device *dev = chan->dev;
+ unsigned long flags;
+ int ret;
+
+ /* Queue it to the pending list */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_add_tail(&s->head, &chan->nvsw.flip);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* Synchronize with the old framebuffer */
+ ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+ if (ret)
+ goto fail;
+
+ /* Emit the pageflip */
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ goto fail;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ OUT_RING(chan, 0);
+ FIRE_RING(chan);
+
+ ret = nouveau_fence_new(chan, pfence, true);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_del(&s->head);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+int
+nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+ struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+ struct nouveau_page_flip_state *s;
+ struct nouveau_channel *chan;
+ struct nouveau_fence *fence;
+ int ret;
+
+ if (dev_priv->engine.graph.accel_blocked)
+ return -ENODEV;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ /* Don't let the buffers go away while we flip */
+ ret = nouveau_page_flip_reserve(old_bo, new_bo);
+ if (ret)
+ goto fail_free;
+
+ /* Initialize a page flip struct */
+ *s = (struct nouveau_page_flip_state)
+ { { }, s->event, nouveau_crtc(crtc)->index,
+ fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+ new_bo->bo.offset };
+
+ /* Choose the channel the flip will be handled in */
+ chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+ if (!chan)
+ chan = nouveau_channel_get_unlocked(dev_priv->channel);
+ mutex_lock(&chan->mutex);
+
+ /* Emit a page flip */
+ ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+ nouveau_channel_put(&chan);
+ if (ret)
+ goto fail_unreserve;
+
+ /* Update the crtc struct and cleanup */
+ crtc->fb = fb;
+
+ nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+ nouveau_fence_unref(&fence);
+ return 0;
+
+fail_unreserve:
+ nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+fail_free:
+ kfree(s);
+ return ret;
+}
+
+int
+nouveau_finish_page_flip(struct nouveau_channel *chan,
+ struct nouveau_page_flip_state *ps)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_page_flip_state *s;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (list_empty(&chan->nvsw.flip)) {
+ NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return -EINVAL;
+ }
+
+ s = list_first_entry(&chan->nvsw.flip,
+ struct nouveau_page_flip_state, head);
+ if (s->event) {
+ struct drm_pending_vblank_event *e = s->event;
+ struct timeval now;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ list_del(&s->head);
+ *ps = *s;
+ kfree(s);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e600dc..65699bfaaae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -36,7 +36,7 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_bo *pushbuf = chan->pushbuf_bo;
- if (dev_priv->card_type == NV_50) {
+ if (dev_priv->card_type >= NV_50) {
const int ib_size = pushbuf->bo.mem.size / 2;
chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
@@ -59,17 +59,26 @@ nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
int ret, i;
- /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
- ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
- 0x0039 : 0x5039, &obj);
- if (ret)
- return ret;
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
+ OUT_RING (chan, 0x00009039);
+ FIRE_RING (chan);
+ return 0;
+ }
- ret = nouveau_ramht_insert(chan, NvM2MF, obj);
- nouveau_gpuobj_ref(NULL, &obj);
+ /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+ ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
+ 0x0039 : 0x5039);
if (ret)
return ret;
@@ -78,11 +87,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
if (ret)
return ret;
- /* Map push buffer */
- ret = nouveau_bo_map(chan->pushbuf_bo);
- if (ret)
- return ret;
-
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index d578c21d3c8..c36f1763fea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -77,7 +77,8 @@ enum {
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
- NvEvoFB32 = 0x01000002
+ NvEvoFB32 = 0x01000002,
+ NvEvoVRAM_LP = 0x01000003
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
@@ -125,6 +126,12 @@ extern void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+{
+ OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
{
OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f309ae3..38d599554bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
struct bit_displayport_encoder_table *dpe;
int dpe_headerlen;
uint8_t config[4], status[3];
- bool cr_done, cr_max_vs, eq_done;
+ bool cr_done, cr_max_vs, eq_done, hpd_state;
int ret = 0, i, tries, voltage;
NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
/* disable hotplug detect, this flips around on some panels during
* link training.
*/
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+ hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
if (dpe->script0) {
NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@ stop:
}
/* re-enable hotplug detect */
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
return eq_done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 90875494a65..13bb672a16f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
int nouveau_perflvl_wr;
module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
+int nouveau_msi;
+module_param_named(msi, nouveau_msi, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -167,6 +171,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
NV_INFO(dev, "Disabling fbcon acceleration...\n");
nouveau_fbcon_save_disable_accel(dev);
@@ -193,23 +200,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
NV_INFO(dev, "Idling channels...\n");
for (i = 0; i < pfifo->channels; i++) {
- struct nouveau_fence *fence = NULL;
-
- chan = dev_priv->fifos[i];
- if (!chan || (dev_priv->card_type >= NV_50 &&
- chan == dev_priv->fifos[0]))
- continue;
-
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret == 0) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- nouveau_fence_unref((void *)&fence);
- }
+ chan = dev_priv->channels.ptr[i];
- if (ret) {
- NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
- chan->id);
- }
+ if (chan && chan->pushbuf_bo)
+ nouveau_channel_idle(chan);
}
pgraph->fifo_access(dev, false);
@@ -219,17 +213,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pfifo->unload_context(dev);
pgraph->unload_context(dev);
- NV_INFO(dev, "Suspending GPU objects...\n");
- ret = nouveau_gpuobj_suspend(dev);
+ ret = pinstmem->suspend(dev);
if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret);
goto out_abort;
}
- ret = pinstmem->suspend(dev);
+ NV_INFO(dev, "Suspending GPU objects...\n");
+ ret = nouveau_gpuobj_suspend(dev);
if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret);
- nouveau_gpuobj_suspend_cleanup(dev);
+ pinstmem->resume(dev);
goto out_abort;
}
@@ -263,6 +257,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_crtc *crtc;
int ret, i;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
@@ -294,17 +291,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
}
+ NV_INFO(dev, "Restoring GPU objects...\n");
+ nouveau_gpuobj_resume(dev);
+
NV_INFO(dev, "Reinitialising engines...\n");
engine->instmem.resume(dev);
engine->mc.init(dev);
engine->timer.init(dev);
engine->fb.init(dev);
engine->graph.init(dev);
+ engine->crypt.init(dev);
engine->fifo.init(dev);
- NV_INFO(dev, "Restoring GPU objects...\n");
- nouveau_gpuobj_resume(dev);
-
nouveau_irq_postinstall(dev);
/* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +311,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
int j;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- chan = dev_priv->fifos[i];
+ chan = dev_priv->channels.ptr[i];
if (!chan || !chan->pushbuf_bo)
continue;
@@ -347,13 +345,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
- nv_crtc->cursor.set_offset(nv_crtc,
- nv_crtc->cursor.nvbo->bo.offset -
- dev_priv->vm_vram_base);
-
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
+ nv_crtc->cursor_saved_y);
}
/* Force CLUT to get re-loaded during modeset */
@@ -393,6 +389,9 @@ static struct drm_driver driver = {
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = nouveau_vblank_enable,
+ .disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = {
@@ -403,6 +402,7 @@ static struct drm_driver driver = {
.mmap = nouveau_ttm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = nouveau_compat_ioctl,
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64c03b..46e32573b3a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,37 @@ struct nouveau_fpriv {
#include "nouveau_drm.h"
#include "nouveau_reg.h"
#include "nouveau_bios.h"
+#include "nouveau_util.h"
+
struct nouveau_grctx;
+struct nouveau_vram;
+#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
-#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
-#define NV50_VM_BLOCK (512*1024*1024ULL)
-#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_vram {
+ struct drm_device *dev;
+
+ struct nouveau_vma bar_vma;
+ u8 page_shift;
+
+ struct list_head regions;
+ u32 memtype;
+ u64 offset;
+ u64 size;
+};
struct nouveau_tile_reg {
- struct nouveau_fence *fence;
- uint32_t addr;
- uint32_t size;
bool used;
+ uint32_t addr;
+ uint32_t limit;
+ uint32_t pitch;
+ uint32_t zcomp;
+ struct drm_mm_node *tag_mem;
+ struct nouveau_fence *fence;
};
struct nouveau_bo {
@@ -88,6 +103,7 @@ struct nouveau_bo {
struct nouveau_channel *channel;
+ struct nouveau_vma vma;
bool mappable;
bool no_vm;
@@ -96,7 +112,6 @@ struct nouveau_bo {
struct nouveau_tile_reg *tile;
struct drm_gem_object *gem;
- struct drm_file *cpu_filp;
int pin_refcnt;
};
@@ -133,20 +148,28 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_SW 0
#define NVOBJ_ENGINE_GR 1
-#define NVOBJ_ENGINE_DISPLAY 2
+#define NVOBJ_ENGINE_PPP 2
+#define NVOBJ_ENGINE_COPY 3
+#define NVOBJ_ENGINE_VP 4
+#define NVOBJ_ENGINE_CRYPT 5
+#define NVOBJ_ENGINE_BSP 6
+#define NVOBJ_ENGINE_DISPLAY 0xcafe0001
#define NVOBJ_ENGINE_INT 0xdeadbeef
+#define NVOBJ_FLAG_DONT_MAP (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
+#define NVOBJ_FLAG_VM (1 << 3)
+
+#define NVOBJ_CINST_GLOBAL 0xdeadbeef
+
struct nouveau_gpuobj {
struct drm_device *dev;
struct kref refcount;
struct list_head list;
- struct drm_mm_node *im_pramin;
- struct nouveau_bo *im_backing;
- uint32_t *im_backing_suspend;
- int im_bound;
+ void *node;
+ u32 *suspend;
uint32_t flags;
@@ -162,10 +185,29 @@ struct nouveau_gpuobj {
void *priv;
};
+struct nouveau_page_flip_state {
+ struct list_head head;
+ struct drm_pending_vblank_event *event;
+ int crtc, bpp, pitch, x, y;
+ uint64_t offset;
+};
+
+enum nouveau_channel_mutex_class {
+ NOUVEAU_UCHANNEL_MUTEX,
+ NOUVEAU_KCHANNEL_MUTEX
+};
+
struct nouveau_channel {
struct drm_device *dev;
int id;
+ /* references to the channel data structure */
+ struct kref ref;
+ /* users of the hardware channel resources, the hardware
+ * context will be kicked off when it reaches zero. */
+ atomic_t users;
+ struct mutex mutex;
+
/* owner of this fifo */
struct drm_file *file_priv;
/* mapping of the fifo itself */
@@ -198,16 +240,17 @@ struct nouveau_channel {
/* PFIFO context */
struct nouveau_gpuobj *ramfc;
struct nouveau_gpuobj *cache;
+ void *fifo_priv;
/* PGRAPH context */
/* XXX may be merge 2 pointers as private data ??? */
struct nouveau_gpuobj *ramin_grctx;
+ struct nouveau_gpuobj *crypt_ctx;
void *pgraph_ctx;
/* NV50 VM */
+ struct nouveau_vm *vm;
struct nouveau_gpuobj *vm_pd;
- struct nouveau_gpuobj *vm_gart_pt;
- struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
/* Objects */
struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +281,11 @@ struct nouveau_channel {
struct {
struct nouveau_gpuobj *vblsem;
+ uint32_t vblsem_head;
uint32_t vblsem_offset;
uint32_t vblsem_rval;
struct list_head vbl_wait;
+ struct list_head flip;
} nvsw;
struct {
@@ -258,11 +303,11 @@ struct nouveau_instmem_engine {
int (*suspend)(struct drm_device *dev);
void (*resume)(struct drm_device *dev);
- int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
- void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
- int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
- int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+ void (*put)(struct nouveau_gpuobj *);
+ int (*map)(struct nouveau_gpuobj *);
+ void (*unmap)(struct nouveau_gpuobj *);
+
void (*flush)(struct drm_device *);
};
@@ -279,15 +324,21 @@ struct nouveau_timer_engine {
struct nouveau_fb_engine {
int num_tiles;
+ struct drm_mm tag_heap;
+ void *priv;
int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev);
- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch);
+ void (*init_tile_region)(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+ void (*set_tile_region)(struct drm_device *dev, int i);
+ void (*free_tile_region)(struct drm_device *dev, int i);
};
struct nouveau_fifo_engine {
+ void *priv;
int channels;
struct nouveau_gpuobj *playlist[2];
@@ -310,22 +361,11 @@ struct nouveau_fifo_engine {
void (*tlb_flush)(struct drm_device *dev);
};
-struct nouveau_pgraph_object_method {
- int id;
- int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
- uint32_t data);
-};
-
-struct nouveau_pgraph_object_class {
- int id;
- bool software;
- struct nouveau_pgraph_object_method *methods;
-};
-
struct nouveau_pgraph_engine {
- struct nouveau_pgraph_object_class *grclass;
bool accel_blocked;
+ bool registered;
int grctx_size;
+ void *priv;
/* NV2x/NV3x context table (0x400780) */
struct nouveau_gpuobj *ctx_table;
@@ -342,8 +382,7 @@ struct nouveau_pgraph_engine {
int (*unload_context)(struct drm_device *);
void (*tlb_flush)(struct drm_device *dev);
- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch);
+ void (*set_tile_region)(struct drm_device *dev, int i);
};
struct nouveau_display_engine {
@@ -355,13 +394,19 @@ struct nouveau_display_engine {
};
struct nouveau_gpio_engine {
+ void *priv;
+
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
int (*get)(struct drm_device *, enum dcb_gpio_tag);
int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
- void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+ int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
};
struct nouveau_pm_voltage_level {
@@ -437,6 +482,7 @@ struct nouveau_pm_engine {
struct nouveau_pm_level *cur;
struct device *hwmon;
+ struct notifier_block acpi_nb;
int (*clock_get)(struct drm_device *, u32 id);
void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +495,25 @@ struct nouveau_pm_engine {
int (*temp_get)(struct drm_device *);
};
+struct nouveau_crypt_engine {
+ bool registered;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ void (*tlb_flush)(struct drm_device *dev);
+};
+
+struct nouveau_vram_engine {
+ int (*init)(struct drm_device *);
+ int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
+ u32 type, struct nouveau_vram **);
+ void (*put)(struct drm_device *, struct nouveau_vram **);
+
+ bool (*flags_valid)(struct drm_device *, u32 tile_flags);
+};
+
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
@@ -459,6 +524,8 @@ struct nouveau_engine {
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
struct nouveau_pm_engine pm;
+ struct nouveau_crypt_engine crypt;
+ struct nouveau_vram_engine vram;
};
struct nouveau_pll_vals {
@@ -577,18 +644,15 @@ struct drm_nouveau_private {
bool ramin_available;
struct drm_mm ramin_heap;
struct list_head gpuobj_list;
+ struct list_head classes;
struct nouveau_bo *vga_ram;
+ /* interrupt handling */
+ void (*irq_handler[32])(struct drm_device *);
+ bool msi_enabled;
struct workqueue_struct *wq;
struct work_struct irq_work;
- struct work_struct hpd_work;
-
- struct {
- spinlock_t lock;
- uint32_t hpd0_bits;
- uint32_t hpd1_bits;
- } hpd_state;
struct list_head vbl_waiting;
@@ -605,8 +669,10 @@ struct drm_nouveau_private {
struct nouveau_bo *bo;
} fence;
- int fifo_alloc_count;
- struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+ struct {
+ spinlock_t lock;
+ struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
+ } channels;
struct nouveau_engine engine;
struct nouveau_channel *channel;
@@ -632,12 +698,14 @@ struct drm_nouveau_private {
uint64_t aper_free;
struct nouveau_gpuobj *sg_ctxdma;
- struct page *sg_dummy_page;
- dma_addr_t sg_dummy_bus;
+ struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
- struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
+ struct {
+ struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+ spinlock_t lock;
+ } tile;
/* VRAM/fb configuration */
uint64_t vram_size;
@@ -650,14 +718,12 @@ struct drm_nouveau_private {
uint64_t fb_aper_free;
int fb_mtrr;
+ /* BAR control (NV50-) */
+ struct nouveau_vm *bar1_vm;
+ struct nouveau_vm *bar3_vm;
+
/* G8x/G9x virtual address space */
- uint64_t vm_gart_base;
- uint64_t vm_gart_size;
- uint64_t vm_vram_base;
- uint64_t vm_vram_size;
- uint64_t vm_end;
- struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
- int vm_vram_pt_nr;
+ struct nouveau_vm *chan_vm;
struct nvbios vbios;
@@ -674,6 +740,7 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
struct nouveau_channel *evo;
+ u32 evo_alloc;
struct {
struct dcb_entry *dcb;
u16 script;
@@ -686,6 +753,8 @@ struct drm_nouveau_private {
struct nouveau_fbdev *nfbdev;
struct apertures_struct *apertures;
+
+ bool powered_down;
};
static inline struct drm_nouveau_private *
@@ -719,16 +788,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0;
}
-#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
- struct drm_nouveau_private *nv = dev->dev_private; \
- if (!nouveau_channel_owner(dev, (cl), (id))) { \
- NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
- DRM_CURRENTPID, (id)); \
- return -EPERM; \
- } \
- (ch) = nv->fifos[(id)]; \
-} while (0)
-
/* nouveau_drv.c */
extern int nouveau_agpmode;
extern int nouveau_duallink;
@@ -748,6 +807,7 @@ extern int nouveau_force_post;
extern int nouveau_override_conntype;
extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
+extern int nouveau_msi;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +822,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
struct drm_file *);
-extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
- uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
@@ -775,18 +837,18 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
-extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
- uint32_t addr,
- uint32_t size,
- uint32_t pitch);
-extern void nv10_mem_expire_tiling(struct drm_device *dev,
- struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence);
-extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
- uint32_t size, uint32_t flags,
- uint64_t phys);
-extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
- uint32_t size);
+extern int nouveau_mem_detect(struct drm_device *);
+extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+ struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv10_mem_put_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence);
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+
+/* nvc0_vram.c */
+extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -803,21 +865,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
extern struct drm_ioctl_desc nouveau_ioctls[];
extern int nouveau_max_ioctl;
extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
- int channel);
extern int nouveau_channel_alloc(struct drm_device *dev,
struct nouveau_channel **chan,
struct drm_file *file_priv,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern void nouveau_channel_free(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
+extern void nouveau_channel_put(struct nouveau_channel **);
+extern void nouveau_channel_ref(struct nouveau_channel *chan,
+ struct nouveau_channel **pchan);
+extern void nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
+#define NVOBJ_CLASS(d,c,e) do { \
+ int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
+ if (ret) \
+ return ret; \
+} while(0)
+
+#define NVOBJ_MTHD(d,c,m,e) do { \
+ int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
+ if (ret) \
+ return ret; \
+} while(0)
+
extern int nouveau_gpuobj_early_init(struct drm_device *);
extern int nouveau_gpuobj_init(struct drm_device *);
extern void nouveau_gpuobj_takedown(struct drm_device *);
extern int nouveau_gpuobj_suspend(struct drm_device *dev);
-extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
+extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
+ int (*exec)(struct nouveau_channel *,
+ u32 class, u32 mthd, u32 data));
+extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
+extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
uint32_t vram_h, uint32_t tt_h);
extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +917,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
uint64_t offset, uint64_t size, int access,
int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
- uint64_t offset, uint64_t size,
- int access, struct nouveau_gpuobj **,
- uint32_t *o_ret);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
- struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
- struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
+extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
+ u64 size, int target, int access, u32 type,
+ u32 comp, struct nouveau_gpuobj **pobj);
+extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
+ int class, u64 base, u64 size, int target,
+ int access, u32 type, u32 comp);
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
struct drm_file *);
/* nouveau_irq.c */
+extern int nouveau_irq_init(struct drm_device *);
+extern void nouveau_irq_fini(struct drm_device *);
extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void nouveau_irq_register(struct drm_device *, int status_bit,
+ void (*)(struct drm_device *));
+extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
extern void nouveau_irq_preinstall(struct drm_device *);
extern int nouveau_irq_postinstall(struct drm_device *);
extern void nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +943,8 @@ extern void nouveau_irq_uninstall(struct drm_device *);
/* nouveau_sgdma.c */
extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
-extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
- uint32_t *page);
+extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
+ uint32_t offset);
extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
/* nouveau_debugfs.c */
@@ -966,18 +1055,25 @@ extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
-extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
+extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
/* nv30_fb.c */
extern int nv30_fb_init(struct drm_device *);
extern void nv30_fb_takedown(struct drm_device *);
+extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
-extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
+
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1085,7 @@ extern void nvc0_fb_takedown(struct drm_device *);
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_fini(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1095,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *);
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
extern int nv04_fifo_load_context(struct nouveau_channel *);
extern int nv04_fifo_unload_context(struct drm_device *);
+extern void nv04_fifo_isr(struct drm_device *);
/* nv10_fifo.c */
extern int nv10_fifo_init(struct drm_device *);
extern int nv10_fifo_channel_id(struct drm_device *);
extern int nv10_fifo_create_context(struct nouveau_channel *);
-extern void nv10_fifo_destroy_context(struct nouveau_channel *);
extern int nv10_fifo_load_context(struct nouveau_channel *);
extern int nv10_fifo_unload_context(struct drm_device *);
/* nv40_fifo.c */
extern int nv40_fifo_init(struct drm_device *);
extern int nv40_fifo_create_context(struct nouveau_channel *);
-extern void nv40_fifo_destroy_context(struct nouveau_channel *);
extern int nv40_fifo_load_context(struct nouveau_channel *);
extern int nv40_fifo_unload_context(struct drm_device *);
@@ -1038,7 +1134,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
extern int nvc0_fifo_unload_context(struct drm_device *);
/* nv04_graph.c */
-extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
extern int nv04_graph_init(struct drm_device *);
extern void nv04_graph_takedown(struct drm_device *);
extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1142,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *);
extern void nv04_graph_destroy_context(struct nouveau_channel *);
extern int nv04_graph_load_context(struct nouveau_channel *);
extern int nv04_graph_unload_context(struct drm_device *);
-extern void nv04_graph_context_switch(struct drm_device *);
+extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data);
+extern struct nouveau_bitfield nv04_graph_nsource[];
/* nv10_graph.c */
-extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
extern int nv10_graph_init(struct drm_device *);
extern void nv10_graph_takedown(struct drm_device *);
extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1154,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *);
extern void nv10_graph_destroy_context(struct nouveau_channel *);
extern int nv10_graph_load_context(struct nouveau_channel *);
extern int nv10_graph_unload_context(struct drm_device *);
-extern void nv10_graph_context_switch(struct drm_device *);
-extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
+extern struct nouveau_bitfield nv10_graph_intr[];
+extern struct nouveau_bitfield nv10_graph_nstatus[];
/* nv20_graph.c */
-extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
-extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
extern int nv20_graph_create_context(struct nouveau_channel *);
extern void nv20_graph_destroy_context(struct nouveau_channel *);
extern int nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1166,9 @@ extern int nv20_graph_unload_context(struct drm_device *);
extern int nv20_graph_init(struct drm_device *);
extern void nv20_graph_takedown(struct drm_device *);
extern int nv30_graph_init(struct drm_device *);
-extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
/* nv40_graph.c */
-extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
extern int nv40_graph_init(struct drm_device *);
extern void nv40_graph_takedown(struct drm_device *);
extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1177,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_unload_context(struct drm_device *);
extern void nv40_grctx_init(struct nouveau_grctx *);
-extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
/* nv50_graph.c */
-extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
extern int nv50_graph_init(struct drm_device *);
extern void nv50_graph_takedown(struct drm_device *);
extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,10 +1188,10 @@ extern int nv50_graph_create_context(struct nouveau_channel *);
extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
-extern void nv50_graph_context_switch(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern struct nouveau_enum nv50_data_error_names[];
/* nvc0_graph.c */
extern int nvc0_graph_init(struct drm_device *);
@@ -1113,16 +1203,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *);
extern int nvc0_graph_load_context(struct nouveau_channel *);
extern int nvc0_graph_unload_context(struct drm_device *);
+/* nv84_crypt.c */
+extern int nv84_crypt_init(struct drm_device *dev);
+extern void nv84_crypt_fini(struct drm_device *dev);
+extern int nv84_crypt_create_context(struct nouveau_channel *);
+extern void nv84_crypt_destroy_context(struct nouveau_channel *);
+extern void nv84_crypt_tlb_flush(struct drm_device *dev);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_suspend(struct drm_device *);
extern void nv04_instmem_resume(struct drm_device *);
-extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv04_instmem_put(struct nouveau_gpuobj *);
+extern int nv04_instmem_map(struct nouveau_gpuobj *);
+extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
extern void nv04_instmem_flush(struct drm_device *);
/* nv50_instmem.c */
@@ -1130,26 +1226,18 @@ extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_suspend(struct drm_device *);
extern void nv50_instmem_resume(struct drm_device *);
-extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv50_instmem_put(struct nouveau_gpuobj *);
+extern int nv50_instmem_map(struct nouveau_gpuobj *);
+extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
extern void nv50_instmem_flush(struct drm_device *);
extern void nv84_instmem_flush(struct drm_device *);
-extern void nv50_vm_flush(struct drm_device *, int engine);
/* nvc0_instmem.c */
extern int nvc0_instmem_init(struct drm_device *);
extern void nvc0_instmem_takedown(struct drm_device *);
extern int nvc0_instmem_suspend(struct drm_device *);
extern void nvc0_instmem_resume(struct drm_device *);
-extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nvc0_instmem_flush(struct drm_device *);
/* nv04_mc.c */
extern int nv04_mc_init(struct drm_device *);
@@ -1219,6 +1307,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu);
/* nouveau_fence.c */
struct nouveau_fence;
@@ -1234,12 +1325,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence,
void (*work)(void *priv, bool signalled),
void *priv);
struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-extern bool nouveau_fence_signalled(void *obj, void *arg);
-extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+
+extern bool __nouveau_fence_signalled(void *obj, void *arg);
+extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int __nouveau_fence_flush(void *obj, void *arg);
+extern void __nouveau_fence_unref(void **obj);
+extern void *__nouveau_fence_ref(void *obj);
+
+static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_signalled(obj, NULL);
+}
+static inline int
+nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
+{
+ return __nouveau_fence_wait(obj, NULL, lazy, intr);
+}
extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-extern int nouveau_fence_flush(void *obj, void *arg);
-extern void nouveau_fence_unref(void **obj);
-extern void *nouveau_fence_ref(void *obj);
+static inline int nouveau_fence_flush(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_flush(obj, NULL);
+}
+static inline void nouveau_fence_unref(struct nouveau_fence **obj)
+{
+ __nouveau_fence_unref((void **)obj);
+}
+static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_ref(obj);
+}
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1373,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
+/* nouveau_display.c */
+int nouveau_vblank_enable(struct drm_device *dev, int crtc);
+void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+int nouveau_finish_page_flip(struct nouveau_channel *,
+ struct nouveau_page_flip_state *);
+
/* nv10_gpio.c */
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
/* nv50_gpio.c */
int nv50_gpio_init(struct drm_device *dev);
+void nv50_gpio_fini(struct drm_device *dev);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
/* nv50_calc. */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1461,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
}
#define nv_wait(dev, reg, mask, val) \
- nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+ nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_ne(dev, reg, mask, val) \
+ nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
/* PRAMIN access */
static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1576,23 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device;
}
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO 1
+#define NV_MEM_ACCESS_WO 2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM 8
+
+#define NV_MEM_TARGET_VRAM 0
+#define NV_MEM_TARGET_PCI 1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM 3
+#define NV_MEM_TARGET_GART 4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+/* NV_SW object class */
#define NV_SW 0x0000506e
#define NV_SW_DMA_SEMAPHORE 0x00000060
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
@@ -1457,5 +1603,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
#define NV_SW_VBLSEM_OFFSET 0x00000400
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
#define NV_SW_VBLSEM_RELEASE 0x00000408
+#define NV_SW_PAGE_FLIP 0x00000500
#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1fd484..a26d04740c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,102 @@
#include "nouveau_fbcon.h"
#include "nouveau_dma.h"
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_fillrect(info, rect);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_fillrect(info, rect);
+ else
+ ret = nvc0_fbcon_fillrect(info, rect);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_copyarea(info, image);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_copyarea(info, image);
+ else
+ ret = nvc0_fbcon_copyarea(info, image);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_imageblit(info, image);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_imageblit(info, image);
+ else
+ ret = nvc0_fbcon_imageblit(info, image);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_imageblit(info, image);
+}
+
static int
nouveau_fbcon_sync(struct fb_info *info)
{
@@ -58,22 +154,36 @@ nouveau_fbcon_sync(struct fb_info *info)
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
- if (!chan || !chan->accel_done ||
+ if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_HWACCEL_DISABLED)
return 0;
- if (RING_SPACE(chan, 4)) {
+ if (!mutex_trylock(&chan->mutex))
+ return 0;
+
+ ret = RING_SPACE(chan, 4);
+ if (ret) {
+ mutex_unlock(&chan->mutex);
nouveau_fbcon_gpu_lockup(info);
return 0;
}
- BEGIN_RING(chan, 0, 0x0104, 1);
- OUT_RING(chan, 0);
- BEGIN_RING(chan, 0, 0x0100, 1);
- OUT_RING(chan, 0);
+ if (dev_priv->card_type >= NV_C0) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
+ OUT_RING (chan, 0);
+ } else {
+ BEGIN_RING(chan, 0, 0x0104, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, 0, 0x0100, 1);
+ OUT_RING (chan, 0);
+ }
+
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
FIRE_RING(chan);
+ mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
@@ -97,9 +207,9 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = nouveau_fbcon_fillrect,
+ .fb_copyarea = nouveau_fbcon_copyarea,
+ .fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
@@ -108,29 +218,13 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static struct fb_ops nv04_fbcon_ops = {
+static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = nv04_fbcon_fillrect,
- .fb_copyarea = nv04_fbcon_copyarea,
- .fb_imageblit = nv04_fbcon_imageblit,
- .fb_sync = nouveau_fbcon_sync,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv50_fbcon_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = nv50_fbcon_fillrect,
- .fb_copyarea = nv50_fbcon_copyarea,
- .fb_imageblit = nv50_fbcon_imageblit,
- .fb_sync = nouveau_fbcon_sync,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -257,21 +351,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
- info->fbops = &nouveau_fbcon_ops;
- info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
- dev_priv->vm_vram_base;
+ info->fbops = &nouveau_fbcon_sw_ops;
+ info->fix.smem_start = dev->mode_config.fb_base +
+ (nvbo->bo.mem.start << PAGE_SHIFT);
info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(pdev, 1);
- info->fix.mmio_len = pci_resource_len(pdev, 1);
-
/* Set aperture base/size for vesafb takeover */
info->apertures = dev_priv->apertures;
if (!info->apertures) {
@@ -285,19 +374,20 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+ mutex_unlock(&dev->struct_mutex);
+
if (dev_priv->channel && !nouveau_nofbaccel) {
- switch (dev_priv->card_type) {
- case NV_C0:
- break;
- case NV_50:
- nv50_fbcon_accel_init(info);
- info->fbops = &nv50_fbcon_ops;
- break;
- default:
- nv04_fbcon_accel_init(info);
- info->fbops = &nv04_fbcon_ops;
- break;
- };
+ ret = -ENODEV;
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_accel_init(info);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_accel_init(info);
+ else
+ ret = nvc0_fbcon_accel_init(info);
+
+ if (ret == 0)
+ info->fbops = &nouveau_fbcon_ops;
}
nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +398,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
nouveau_fb->base.height,
nvbo->bo.offset, nvbo);
- mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e12684c37..b73c29f87fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,15 +40,21 @@ struct nouveau_fbdev {
void nouveau_fbcon_restore(void);
-void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info);
-void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+
+int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
+int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nvc0_fbcon_accel_init(struct fb_info *info);
+
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfbf266..221b8462ea3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,7 +32,8 @@
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
+ nouveau_private(dev)->card_type < NV_C0)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -64,6 +65,7 @@ nouveau_fence_del(struct kref *ref)
struct nouveau_fence *fence =
container_of(ref, struct nouveau_fence, refcount);
+ nouveau_channel_ref(NULL, &fence->channel);
kfree(fence);
}
@@ -76,14 +78,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
spin_lock(&chan->fence.lock);
- if (USE_REFCNT(dev))
- sequence = nvchan_rd32(chan, 0x48);
- else
- sequence = atomic_read(&chan->fence.last_sequence_irq);
+ /* Fetch the last sequence if the channel is still up and running */
+ if (likely(!list_empty(&chan->fence.pending))) {
+ if (USE_REFCNT(dev))
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+ sequence = atomic_read(&chan->fence.last_sequence_irq);
- if (chan->fence.sequence_ack == sequence)
- goto out;
- chan->fence.sequence_ack = sequence;
+ if (chan->fence.sequence_ack == sequence)
+ goto out;
+ chan->fence.sequence_ack = sequence;
+ }
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
sequence = fence->sequence;
@@ -113,13 +118,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
if (!fence)
return -ENOMEM;
kref_init(&fence->refcount);
- fence->channel = chan;
+ nouveau_channel_ref(chan, &fence->channel);
if (emit)
ret = nouveau_fence_emit(fence);
if (ret)
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
}
@@ -127,7 +132,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
struct nouveau_channel *
nouveau_fence_channel(struct nouveau_fence *fence)
{
- return fence ? fence->channel : NULL;
+ return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
}
int
@@ -135,6 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
ret = RING_SPACE(chan, 2);
@@ -155,8 +161,15 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
- BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
- OUT_RING(chan, fence->sequence);
+ if (USE_REFCNT(dev)) {
+ if (dev_priv->card_type < NV_C0)
+ BEGIN_RING(chan, NvSubSw, 0x0050, 1);
+ else
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
+ } else {
+ BEGIN_RING(chan, NvSubSw, 0x0150, 1);
+ }
+ OUT_RING (chan, fence->sequence);
FIRE_RING(chan);
return 0;
@@ -182,7 +195,7 @@ nouveau_fence_work(struct nouveau_fence *fence,
}
void
-nouveau_fence_unref(void **sync_obj)
+__nouveau_fence_unref(void **sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
@@ -192,7 +205,7 @@ nouveau_fence_unref(void **sync_obj)
}
void *
-nouveau_fence_ref(void *sync_obj)
+__nouveau_fence_ref(void *sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
@@ -201,7 +214,7 @@ nouveau_fence_ref(void *sync_obj)
}
bool
-nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel;
@@ -214,13 +227,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
}
int
-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
+ unsigned long sleep_time = jiffies + 1;
int ret = 0;
while (1) {
- if (nouveau_fence_signalled(sync_obj, sync_arg))
+ if (__nouveau_fence_signalled(sync_obj, sync_arg))
break;
if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +244,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
- if (lazy)
+ if (lazy && time_after_eq(jiffies, sleep_time))
schedule_timeout(1);
if (intr && signal_pending(current)) {
@@ -368,7 +382,7 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
return 0;
}
@@ -380,33 +394,49 @@ nouveau_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *chan = nouveau_fence_channel(fence);
struct drm_device *dev = wchan->dev;
struct nouveau_semaphore *sema;
- int ret;
+ int ret = 0;
- if (likely(!fence || chan == wchan ||
- nouveau_fence_signalled(fence, NULL)))
- return 0;
+ if (likely(!chan || chan == wchan ||
+ nouveau_fence_signalled(fence)))
+ goto out;
sema = alloc_semaphore(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
- return nouveau_fence_wait(fence, NULL, false, false);
+ ret = nouveau_fence_wait(fence, true, false);
+ goto out;
+ }
+
+ /* try to take chan's mutex, if we can't take it right away
+ * we have to fallback to software sync to prevent locking
+ * order issues
+ */
+ if (!mutex_trylock(&chan->mutex)) {
+ ret = nouveau_fence_wait(fence, true, false);
+ goto out_unref;
}
/* Make wchan wait until it gets signalled */
ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
if (ret)
- goto out;
+ goto out_unlock;
/* Signal the semaphore from chan */
ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
-out:
+
+out_unlock:
+ mutex_unlock(&chan->mutex);
+out_unref:
kref_put(&sema->ref, free_semaphore);
+out:
+ if (chan)
+ nouveau_channel_put_unlocked(&chan);
return ret;
}
int
-nouveau_fence_flush(void *sync_obj, void *sync_arg)
+__nouveau_fence_flush(void *sync_obj, void *sync_arg)
{
return 0;
}
@@ -420,30 +450,27 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
int ret;
/* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
- ret = nouveau_ramht_insert(chan, NvSw, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret)
- return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
+ /* we leave subchannel empty for nvc0 */
+ if (dev_priv->card_type < NV_C0) {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+ }
/* Create a DMA object for the shared cross-channel sync area. */
if (USE_SEMA(dev)) {
- struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+ struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
mem->start << PAGE_SHIFT,
- mem->size << PAGE_SHIFT,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &obj);
+ mem->size, NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
if (ret)
return ret;
@@ -473,6 +500,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
struct nouveau_fence *tmp, *fence;
+ spin_lock(&chan->fence.lock);
+
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
@@ -482,6 +511,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del);
}
+
+ spin_unlock(&chan->fence.lock);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf400c..506c508b7ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
nvbo->gem = NULL;
- if (unlikely(nvbo->cpu_filp))
- ttm_bo_synccpu_write_release(bo);
-
if (unlikely(nvbo->pin_refcnt)) {
nvbo->pin_refcnt = 1;
nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
return 0;
}
-static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->card_type >= NV_50) {
- switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
- case 0x0000:
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7000:
- case 0x7400:
- case 0x7a00:
- case 0xe000:
- return true;
- }
- } else {
- if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
- return true;
- }
-
- NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
- return false;
-}
-
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
- if (req->channel_hint) {
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
- file_priv, chan);
- }
-
if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
- if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+ if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
+ NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
+ }
+
+ if (req->channel_hint) {
+ chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ }
ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
req->info.tile_mode, req->info.tile_flags, false,
(req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
&nvbo);
+ if (chan)
+ nouveau_channel_put(&chan);
if (ret)
return ret;
@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
- if (likely(fence)) {
- struct nouveau_fence *prev_fence;
-
- spin_lock(&nvbo->bo.lock);
- prev_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = nouveau_fence_ref(fence);
- spin_unlock(&nvbo->bo.lock);
- nouveau_fence_unref((void *)&prev_fence);
- }
+
+ nouveau_bo_fence(nvbo, fence);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@ retry:
return -EINVAL;
}
- ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+ ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
- if (ret == -EAGAIN)
- ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+ if (unlikely(ret == -EAGAIN))
+ ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
drm_gem_object_unreference_unlocked(gem);
- if (ret) {
- NV_ERROR(dev, "fail reserve\n");
+ if (unlikely(ret)) {
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "fail reserve\n");
return ret;
}
goto retry;
@@ -331,25 +301,6 @@ retry:
validate_fini(op, NULL);
return -EINVAL;
}
-
- if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
- validate_fini(op, NULL);
-
- if (nvbo->cpu_filp == file_priv) {
- NV_ERROR(dev, "bo %p mapped by process trying "
- "to validate it!\n", nvbo);
- return -EINVAL;
- }
-
- mutex_unlock(&drm_global_mutex);
- ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- mutex_lock(&drm_global_mutex);
- if (ret) {
- NV_ERROR(dev, "fail wait_cpu\n");
- return ret;
- }
- goto retry;
- }
}
return 0;
@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
}
nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false, false);
+ ret = nouveau_bo_validate(nvbo, true, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
- NV_ERROR(dev, "fail ttm_validate\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "fail ttm_validate\n");
return ret;
}
@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
- NV_ERROR(dev, "validate_init\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate_init\n");
return ret;
}
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate vram_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate gart_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate both_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -557,9 +512,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
data |= r->vor;
}
- spin_lock(&nvbo->bo.lock);
+ spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
break;
@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
req->vram_available = dev_priv->fb_aper_free;
req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
- if (IS_ERR(push))
+ if (IS_ERR(push)) {
+ nouveau_channel_put(&chan);
return PTR_ERR(push);
+ }
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
kfree(push);
+ nouveau_channel_put(&chan);
return PTR_ERR(bo);
}
@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc);
if (ret) {
- NV_ERROR(dev, "validate: %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate: %d\n", ret);
goto out;
}
@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
- nouveau_fence_unref((void**)&fence);
+ nouveau_fence_unref(&fence);
kfree(bo);
kfree(push);
@@ -750,6 +714,7 @@ out_next:
req->suffix1 = 0x00000000;
}
+ nouveau_channel_put(&chan);
return ret;
}
@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- if (nvbo->cpu_filp) {
- if (nvbo->cpu_filp == file_priv)
- goto out;
-
- ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
- if (ret)
- goto out;
- }
-
- if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
- spin_unlock(&nvbo->bo.lock);
- } else {
- ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
- if (ret == 0)
- nvbo->cpu_filp = file_priv;
- }
-
-out:
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -809,26 +757,7 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_gem_cpu_prep *req = data;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- int ret = -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- if (nvbo->cpu_filp != file_priv)
- goto out;
- nvbo->cpu_filp = NULL;
-
- ttm_bo_synccpu_write_release(&nvbo->bo);
- ret = 0;
-
-out:
- drm_gem_object_unreference_unlocked(gem);
- return ret;
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a05c41..053edf9d2f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
- if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+ if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
else
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
if (dev_priv->card_type == NV_10) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
- nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
- nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+ nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+ nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
}
wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
- /* Setting 1 on this value gives you interrupts for every vblank period. */
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+ /* Enable vblank interrupts. */
+ NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
+ (dev->vblank_enabled[head] ? 1 : 0));
NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6c9d6..2ba7265bc96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_ramht.h"
-#include <linux/ratelimit.h>
-
-/* needed for hotplug irq */
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
- return __ratelimit(&nouveau_ratelimit_state);
-}
+#include "nouveau_util.h"
void
nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@ nouveau_irq_preinstall(struct drm_device *dev)
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (dev_priv->card_type >= NV_50) {
- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
- INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
- spin_lock_init(&dev_priv->hpd_state.lock);
- INIT_LIST_HEAD(&dev_priv->vbl_waiting);
- }
+ INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
int
nouveau_irq_postinstall(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
/* Master enable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+ if (dev_priv->msi_enabled)
+ nv_wr08(dev, 0x00088068, 0xff);
+
return 0;
}
@@ -80,1178 +69,83 @@ nouveau_irq_uninstall(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
}
-static int
-nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_pgraph_object_method *grm;
- struct nouveau_pgraph_object_class *grc;
-
- grc = dev_priv->engine.graph.grclass;
- while (grc->id) {
- if (grc->id == class)
- break;
- grc++;
- }
-
- if (grc->id != class || !grc->methods)
- return -ENOENT;
-
- grm = grc->methods;
- while (grm->id) {
- if (grm->id == mthd)
- return grm->exec(chan, class, mthd, data);
- grm++;
- }
-
- return -ENOENT;
-}
-
-static bool
-nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
-{
- struct drm_device *dev = chan->dev;
- const int subc = (addr >> 13) & 0x7;
- const int mthd = addr & 0x1ffc;
-
- if (mthd == 0x0000) {
- struct nouveau_gpuobj *gpuobj;
-
- gpuobj = nouveau_ramht_find(chan, data);
- if (!gpuobj)
- return false;
-
- if (gpuobj->engine != NVOBJ_ENGINE_SW)
- return false;
-
- chan->sw_subchannel[subc] = gpuobj->class;
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
- NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
- return true;
- }
-
- /* hw object */
- if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
- return false;
-
- if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
- return false;
-
- return true;
-}
-
-static void
-nouveau_fifo_irq_handler(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t status, reassign;
- int cnt = 0;
-
- reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
- while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
- struct nouveau_channel *chan = NULL;
- uint32_t chid, get;
-
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
- chid = engine->fifo.channel_id(dev);
- if (chid >= 0 && chid < engine->fifo.channels)
- chan = dev_priv->fifos[chid];
- get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
- if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- uint32_t mthd, data;
- int ptr;
-
- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
- * wrapping on my G80 chips, but CACHE1 isn't big
- * enough for this much data.. Tests show that it
- * wraps around to the start at GET=0x800.. No clue
- * as to why..
- */
- ptr = (get & 0x7ff) >> 2;
-
- if (dev_priv->card_type < NV_40) {
- mthd = nv_rd32(dev,
- NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV04_PFIFO_CACHE1_DATA(ptr));
- } else {
- mthd = nv_rd32(dev,
- NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV40_PFIFO_CACHE1_DATA(ptr));
- }
-
- if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
- NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
- chid, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
- }
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
- status &= ~NV_PFIFO_INTR_CACHE_ERROR;
- }
-
- if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(dev, 0x003244);
- u32 dma_put = nv_rd32(dev, 0x003240);
- u32 push = nv_rd32(dev, 0x003220);
- u32 state = nv_rd32(dev, 0x003228);
-
- if (dev_priv->card_type == NV_50) {
- u32 ho_get = nv_rd32(dev, 0x003328);
- u32 ho_put = nv_rd32(dev, 0x003320);
- u32 ib_get = nv_rd32(dev, 0x003334);
- u32 ib_put = nv_rd32(dev, 0x003330);
-
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- push);
-
- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(dev, 0x003364, 0x00000000);
- if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(dev, 0x003244, dma_put);
- nv_wr32(dev, 0x003328, ho_put);
- } else
- if (ib_get != ib_put) {
- nv_wr32(dev, 0x003334, ib_put);
- }
- } else {
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, dma_get, dma_put, state, push);
-
- if (dma_get != dma_put)
- nv_wr32(dev, 0x003244, dma_put);
- }
-
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000001);
- nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
- status &= ~NV_PFIFO_INTR_DMA_PUSHER;
- }
-
- if (status & NV_PFIFO_INTR_SEMAPHORE) {
- uint32_t sem;
-
- status &= ~NV_PFIFO_INTR_SEMAPHORE;
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_SEMAPHORE);
-
- sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- }
-
- if (dev_priv->card_type == NV_50) {
- if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
- status &= ~0x00000010;
- nv_wr32(dev, 0x002100, 0x00000010);
- }
- }
-
- if (status) {
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
- status, chid);
- nv_wr32(dev, NV03_PFIFO_INTR_0, status);
- status = 0;
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
- }
-
- if (status) {
- NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
- nv_wr32(dev, 0x2140, 0);
- nv_wr32(dev, 0x140, 0);
- }
-
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-struct nouveau_bitfield_names {
- uint32_t mask;
- const char *name;
-};
-
-static struct nouveau_bitfield_names nstatus_names[] =
-{
- { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nstatus_names_nv10[] =
-{
- { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nsource_names[] =
-{
- { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
- { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
- { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
- { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
- { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
- { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
- { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
- { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
- { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
- { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
-};
-
-static void
-nouveau_print_bitfield_names_(uint32_t value,
- const struct nouveau_bitfield_names *namelist,
- const int namelist_len)
-{
- /*
- * Caller must have already printed the KERN_* log level for us.
- * Also the caller is responsible for adding the newline.
- */
- int i;
- for (i = 0; i < namelist_len; ++i) {
- uint32_t mask = namelist[i].mask;
- if (value & mask) {
- printk(" %s", namelist[i].name);
- value &= ~mask;
- }
- }
- if (value)
- printk(" (unknown bits 0x%08x)", value);
-}
-#define nouveau_print_bitfield_names(val, namelist) \
- nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-struct nouveau_enum_names {
- uint32_t value;
- const char *name;
-};
-
-static void
-nouveau_print_enum_names_(uint32_t value,
- const struct nouveau_enum_names *namelist,
- const int namelist_len)
-{
- /*
- * Caller must have already printed the KERN_* log level for us.
- * Also the caller is responsible for adding the newline.
- */
- int i;
- for (i = 0; i < namelist_len; ++i) {
- if (value == namelist[i].value) {
- printk("%s", namelist[i].name);
- return;
- }
- }
- printk("unknown value 0x%08x", value);
-}
-#define nouveau_print_enum_names(val, namelist) \
- nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-static int
-nouveau_graph_chid_from_grctx(struct drm_device *dev)
+irqreturn_t
+nouveau_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
+ unsigned long flags;
+ u32 stat;
int i;
- if (dev_priv->card_type < NV_40)
- return dev_priv->engine.fifo.channels;
- else
- if (dev_priv->card_type < NV_50) {
- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
-
- if (!chan || !chan->ramin_grctx)
- continue;
-
- if (inst == chan->ramin_grctx->pinst)
- break;
- }
- } else {
- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
-
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->vinst)
- break;
- }
- }
-
-
- return i;
-}
-
-static int
-nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- int channel;
-
- if (dev_priv->card_type < NV_10)
- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
- else
- if (dev_priv->card_type < NV_40)
- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- else
- channel = nouveau_graph_chid_from_grctx(dev);
-
- if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
- NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
- return -EINVAL;
- }
-
- *channel_ret = channel;
- return 0;
-}
-
-struct nouveau_pgraph_trap {
- int channel;
- int class;
- int subc, mthd, size;
- uint32_t data, data2;
- uint32_t nsource, nstatus;
-};
-
-static void
-nouveau_graph_trap_info(struct drm_device *dev,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t address;
-
- trap->nsource = trap->nstatus = 0;
- if (dev_priv->card_type < NV_50) {
- trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- }
-
- if (nouveau_graph_trapped_channel(dev, &trap->channel))
- trap->channel = -1;
- address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-
- trap->mthd = address & 0x1FFC;
- trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- if (dev_priv->card_type < NV_10) {
- trap->subc = (address >> 13) & 0x7;
- } else {
- trap->subc = (address >> 16) & 0x7;
- trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
- }
-
- if (dev_priv->card_type < NV_10)
- trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
- else if (dev_priv->card_type < NV_40)
- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
- else if (dev_priv->card_type < NV_50)
- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
- else
- trap->class = nv_rd32(dev, 0x400814);
-}
-
-static void
-nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
-
- if (dev_priv->card_type < NV_50) {
- NV_INFO(dev, "%s - nSource:", id);
- nouveau_print_bitfield_names(nsource, nsource_names);
- printk(", nStatus:");
- if (dev_priv->card_type < NV_10)
- nouveau_print_bitfield_names(nstatus, nstatus_names);
- else
- nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
- printk("\n");
- }
-
- NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
- "Data 0x%08x:0x%08x\n",
- id, trap->channel, trap->subc,
- trap->class, trap->mthd,
- trap->data2, trap->data);
-}
-
-static int
-nouveau_pgraph_intr_swmthd(struct drm_device *dev,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (trap->channel < 0 ||
- trap->channel >= dev_priv->engine.fifo.channels ||
- !dev_priv->fifos[trap->channel])
- return -ENODEV;
-
- return nouveau_call_method(dev_priv->fifos[trap->channel],
- trap->class, trap->mthd, trap->data);
-}
-
-static inline void
-nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
+ stat = nv_rd32(dev, NV03_PMC_INTR_0);
+ if (!stat)
+ return IRQ_NONE;
- nouveau_graph_trap_info(dev, &trap);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ for (i = 0; i < 32 && stat; i++) {
+ if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
+ continue;
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- } else {
- unhandled = 1;
+ dev_priv->irq_handler[i](dev);
+ stat &= ~(1 << i);
}
- if (unhandled)
- nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
-}
-
-
-static inline void
-nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
-
- nouveau_graph_trap_info(dev, &trap);
- trap.nsource = nsource;
-
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
- uint32_t v = nv_rd32(dev, 0x402000);
- nv_wr32(dev, 0x402000, v);
-
- /* dump the error anyway for now: it's useful for
- Gallium development */
- unhandled = 1;
- } else {
- unhandled = 1;
- }
+ if (dev_priv->msi_enabled)
+ nv_wr08(dev, 0x00088068, 0xff);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- if (unhandled && nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
+ if (stat && nouveau_ratelimit())
+ NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
+ return IRQ_HANDLED;
}
-static inline void
-nouveau_pgraph_intr_context_switch(struct drm_device *dev)
+int
+nouveau_irq_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t chid;
-
- chid = engine->fifo.channel_id(dev);
- NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
-
- switch (dev_priv->card_type) {
- case NV_04:
- nv04_graph_context_switch(dev);
- break;
- case NV_10:
- nv10_graph_context_switch(dev);
- break;
- default:
- NV_ERROR(dev, "Context switch not implemented\n");
- break;
- }
-}
-
-static void
-nouveau_pgraph_irq_handler(struct drm_device *dev)
-{
- uint32_t status;
-
- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
- if (status & NV_PGRAPH_INTR_NOTIFY) {
- nouveau_pgraph_intr_notify(dev, nsource);
-
- status &= ~NV_PGRAPH_INTR_NOTIFY;
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
- }
-
- if (status & NV_PGRAPH_INTR_ERROR) {
- nouveau_pgraph_intr_error(dev, nsource);
+ int ret;
- status &= ~NV_PGRAPH_INTR_ERROR;
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
+ if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
+ ret = pci_enable_msi(dev->pdev);
+ if (ret == 0) {
+ NV_INFO(dev, "enabled MSI\n");
+ dev_priv->msi_enabled = true;
}
-
- if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv_wr32(dev, NV03_PGRAPH_INTR,
- NV_PGRAPH_INTR_CONTEXT_SWITCH);
-
- nouveau_pgraph_intr_context_switch(dev);
- }
-
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
-
- if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
- nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
}
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-}
-
-static struct nouveau_enum_names nv50_mp_exec_error_names[] =
-{
- { 3, "STACK_UNDERFLOW" },
- { 4, "QUADON_ACTIVE" },
- { 8, "TIMEOUT" },
- { 0x10, "INVALID_OPCODE" },
- { 0x40, "BREAKPOINT" },
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t units = nv_rd32(dev, 0x1540);
- uint32_t addr, mp10, status, pc, oplow, ophigh;
- int i;
- int mps = 0;
- for (i = 0; i < 4; i++) {
- if (!(units & 1 << (i+24)))
- continue;
- if (dev_priv->chipset < 0xa0)
- addr = 0x408200 + (tpid << 12) + (i << 7);
- else
- addr = 0x408100 + (tpid << 11) + (i << 7);
- mp10 = nv_rd32(dev, addr + 0x10);
- status = nv_rd32(dev, addr + 0x14);
- if (!status)
- continue;
- if (display) {
- nv_rd32(dev, addr + 0x20);
- pc = nv_rd32(dev, addr + 0x24);
- oplow = nv_rd32(dev, addr + 0x70);
- ophigh= nv_rd32(dev, addr + 0x74);
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
- "TP %d MP %d: ", tpid, i);
- nouveau_print_enum_names(status,
- nv50_mp_exec_error_names);
- printk(" at %06x warp %d, opcode %08x %08x\n",
- pc&0xffffff, pc >> 24,
- oplow, ophigh);
- }
- nv_wr32(dev, addr + 0x10, mp10);
- nv_wr32(dev, addr + 0x14, 0);
- mps++;
- }
- if (!mps && display)
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
- "No MPs claiming errors?\n", tpid);
+ return drm_irq_install(dev);
}
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
- uint32_t ustatus_new, int display, const char *name)
+void
+nouveau_irq_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int tps = 0;
- uint32_t units = nv_rd32(dev, 0x1540);
- int i, r;
- uint32_t ustatus_addr, ustatus;
- for (i = 0; i < 16; i++) {
- if (!(units & (1 << i)))
- continue;
- if (dev_priv->chipset < 0xa0)
- ustatus_addr = ustatus_old + (i << 12);
- else
- ustatus_addr = ustatus_new + (i << 11);
- ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
- if (!ustatus)
- continue;
- tps++;
- switch (type) {
- case 6: /* texture error... unknown for now */
- nv50_fb_vm_trap(dev, display, name);
- if (display) {
- NV_ERROR(dev, "magic set %d:\n", i);
- for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- }
- break;
- case 7: /* MP error */
- if (ustatus & 0x00010000) {
- nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x00010000;
- }
- break;
- case 8: /* TPDMA error */
- {
- uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
- uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
- uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
- uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
- uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
- uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
- uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_fb_vm_trap(dev, display, name);
- /* 2d engine destination */
- if (ustatus & 0x00000010) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000010;
- }
- /* Render target */
- if (ustatus & 0x00000040) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000040;
- }
- /* CUDA memory: l[], g[] or stack. */
- if (ustatus & 0x00000080) {
- if (display) {
- if (e18 & 0x80000000) {
- /* g[] read fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 24) & 0x1f));
- e18 &= ~0x1f000000;
- } else if (e18 & 0xc) {
- /* g[] write fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 7) & 0x1f));
- e18 &= ~0x00000f80;
- } else {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
- i, e14, e10);
- }
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000080;
- }
- }
- break;
- }
- if (ustatus) {
- if (display)
- NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
- }
- nv_wr32(dev, ustatus_addr, 0xc0000000);
- }
-
- if (!tps && display)
- NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static void
-nv50_pgraph_trap_handler(struct drm_device *dev)
-{
- struct nouveau_pgraph_trap trap;
- uint32_t status = nv_rd32(dev, 0x400108);
- uint32_t ustatus;
- int display = nouveau_ratelimit();
-
-
- if (!status && display) {
- nouveau_graph_trap_info(dev, &trap);
- nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
- NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
- }
-
- /* DISPATCH: Relays commands to other units and handles NOTIFY,
- * COND, QUERY. If you get a trap from it, the command is still stuck
- * in DISPATCH and you need to do something about it. */
- if (status & 0x001) {
- ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
- }
-
- /* Known to be triggered by screwed up NOTIFY and COND... */
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
- nv_wr32(dev, 0x400500, 0);
- if (nv_rd32(dev, 0x400808) & 0x80000000) {
- if (display) {
- if (nouveau_graph_trapped_channel(dev, &trap.channel))
- trap.channel = -1;
- trap.class = nv_rd32(dev, 0x400814);
- trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
- trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
- trap.data = nv_rd32(dev, 0x40080c);
- trap.data2 = nv_rd32(dev, 0x400810);
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
- }
- nv_wr32(dev, 0x400808, 0);
- } else if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
- }
- nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
- nv_wr32(dev, 0x400848, 0);
- ustatus &= ~0x00000001;
- }
- if (ustatus & 0x00000002) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
- nv_wr32(dev, 0x400500, 0);
- if (nv_rd32(dev, 0x40084c) & 0x80000000) {
- if (display) {
- if (nouveau_graph_trapped_channel(dev, &trap.channel))
- trap.channel = -1;
- trap.class = nv_rd32(dev, 0x400814);
- trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
- trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
- trap.data = nv_rd32(dev, 0x40085c);
- trap.data2 = 0;
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
- }
- nv_wr32(dev, 0x40084c, 0);
- } else if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
- }
- ustatus &= ~0x00000002;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x400804, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x001);
- status &= ~0x001;
- }
-
- /* TRAPs other than dispatch use the "normal" trap regs. */
- if (status && display) {
- nouveau_graph_trap_info(dev, &trap);
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP", &trap);
- }
-
- /* M2MF: Memory to memory copy engine. */
- if (status & 0x002) {
- ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
- ustatus &= ~0x00000001;
- }
- if (ustatus & 0x00000002) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
- ustatus &= ~0x00000002;
- }
- if (ustatus & 0x00000004) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
- ustatus &= ~0x00000004;
- }
- NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x406804),
- nv_rd32(dev, 0x406808),
- nv_rd32(dev, 0x40680c),
- nv_rd32(dev, 0x406810));
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 2);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x406800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x002);
- status &= ~0x002;
- }
-
- /* VFETCH: Fetches data from vertex buffers. */
- if (status & 0x004) {
- ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x400c00),
- nv_rd32(dev, 0x400c08),
- nv_rd32(dev, 0x400c0c),
- nv_rd32(dev, 0x400c10));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x004);
- status &= ~0x004;
- }
-
- /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
- if (status & 0x008) {
- ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x401804),
- nv_rd32(dev, 0x401808),
- nv_rd32(dev, 0x40180c),
- nv_rd32(dev, 0x401810));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 0x80);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x401800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x008);
- status &= ~0x008;
- }
-
- /* CCACHE: Handles code and c[] caches and fills them. */
- if (status & 0x010) {
- ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x405800),
- nv_rd32(dev, 0x405804),
- nv_rd32(dev, 0x405808),
- nv_rd32(dev, 0x40580c),
- nv_rd32(dev, 0x405810),
- nv_rd32(dev, 0x405814),
- nv_rd32(dev, 0x40581c));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x405018, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x010);
- status &= ~0x010;
- }
-
- /* Unknown, not seen yet... 0x402000 is the only trap status reg
- * remaining, so try to handle it anyway. Perhaps related to that
- * unknown DMA slot on tesla? */
- if (status & 0x20) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
- ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
- if (display)
- NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x402000, 0xc0000000);
- /* no status modifiction on purpose */
- }
-
- /* TEXTURE: CUDA texturing units */
- if (status & 0x040) {
- nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
- "PGRAPH_TRAP_TEXTURE");
- nv_wr32(dev, 0x400108, 0x040);
- status &= ~0x040;
- }
-
- /* MP: CUDA execution engines. */
- if (status & 0x080) {
- nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
- "PGRAPH_TRAP_MP");
- nv_wr32(dev, 0x400108, 0x080);
- status &= ~0x080;
- }
-
- /* TPDMA: Handles TP-initiated uncached memory accesses:
- * l[], g[], stack, 2d surfaces, render targets. */
- if (status & 0x100) {
- nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
- "PGRAPH_TRAP_TPDMA");
- nv_wr32(dev, 0x400108, 0x100);
- status &= ~0x100;
- }
-
- if (status) {
- if (display)
- NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
- status);
- nv_wr32(dev, 0x400108, status);
- }
-}
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-static struct nouveau_enum_names nv50_data_error_names[] =
-{
- { 4, "INVALID_VALUE" },
- { 5, "INVALID_ENUM" },
- { 8, "INVALID_OBJECT" },
- { 0xc, "INVALID_BITFIELD" },
- { 0x28, "MP_NO_REG_SPACE" },
- { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
-};
-
-static void
-nv50_pgraph_irq_handler(struct drm_device *dev)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
- uint32_t status;
-
- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- /* NOTIFY: You've set a NOTIFY an a command and it's done. */
- if (status & 0x00000001) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_NOTIFY", &trap);
- status &= ~0x00000001;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
- }
-
- /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
- * when you write 0x200 to 0x50c0 method 0x31c. */
- if (status & 0x00000002) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_COMPUTE_QUERY", &trap);
- status &= ~0x00000002;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
- }
-
- /* Unknown, never seen: 0x4 */
-
- /* ILLEGAL_MTHD: You used a wrong method for this class. */
- if (status & 0x00000010) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- if (unhandled && nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_ILLEGAL_MTHD", &trap);
- status &= ~0x00000010;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
- }
-
- /* ILLEGAL_CLASS: You used a wrong class. */
- if (status & 0x00000020) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_ILLEGAL_CLASS", &trap);
- status &= ~0x00000020;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
- }
-
- /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
- if (status & 0x00000040) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_DOUBLE_NOTIFY", &trap);
- status &= ~0x00000040;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
- }
-
- /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
- if (status & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR,
- NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) &
- ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, 0x400500, 0x00010001);
-
- nv50_graph_context_switch(dev);
-
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- }
-
- /* BUFFER_NOTIFY: Your m2mf transfer finished */
- if (status & 0x00010000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_BUFFER_NOTIFY", &trap);
- status &= ~0x00010000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
- }
-
- /* DATA_ERROR: Invalid value for this method, or invalid
- * state in current PGRAPH context for this operation */
- if (status & 0x00100000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit()) {
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_DATA_ERROR", &trap);
- NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
- nouveau_print_enum_names(nv_rd32(dev, 0x400110),
- nv50_data_error_names);
- printk("\n");
- }
- status &= ~0x00100000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
- }
- /* TRAP: Something bad happened in the middle of command
- * execution. Has a billion types, subtypes, and even
- * subsubtypes. */
- if (status & 0x00200000) {
- nv50_pgraph_trap_handler(dev);
- status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
- }
-
- /* Unknown, never seen: 0x00400000 */
-
- /* SINGLE_STEP: Happens on every method if you turned on
- * single stepping in 40008c */
- if (status & 0x01000000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_SINGLE_STEP", &trap);
- status &= ~0x01000000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
- }
-
- /* 0x02000000 happens when you pause a ctxprog...
- * but the only way this can happen that I know is by
- * poking the relevant MMIO register, and we don't
- * do that. */
-
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
- status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
-
- {
- const int isb = (1 << 16) | (1 << 0);
-
- if ((nv_rd32(dev, 0x400500) & isb) != isb)
- nv_wr32(dev, 0x400500,
- nv_rd32(dev, 0x400500) | isb);
- }
- }
-
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
- if (nv_rd32(dev, 0x400824) & (1 << 31))
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ drm_irq_uninstall(dev);
+ if (dev_priv->msi_enabled)
+ pci_disable_msi(dev->pdev);
}
-static void
-nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
+void
+nouveau_irq_register(struct drm_device *dev, int status_bit,
+ void (*handler)(struct drm_device *))
{
- if (crtc & 1)
- nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- if (crtc & 2)
- nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ dev_priv->irq_handler[status_bit] = handler;
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
-irqreturn_t
-nouveau_irq_handler(DRM_IRQ_ARGS)
+void
+nouveau_irq_unregister(struct drm_device *dev, int status_bit)
{
- struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status;
unsigned long flags;
- status = nv_rd32(dev, NV03_PMC_INTR_0);
- if (!status)
- return IRQ_NONE;
-
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
- nouveau_fifo_irq_handler(dev);
- status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
- }
-
- if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
- if (dev_priv->card_type >= NV_50)
- nv50_pgraph_irq_handler(dev);
- else
- nouveau_pgraph_irq_handler(dev);
-
- status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
- }
-
- if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
- nouveau_crtc_irq_handler(dev, (status>>24)&3);
- status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
- }
-
- if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
- NV_PMC_INTR_0_NV50_I2C_PENDING)) {
- nv50_display_irq_handler(dev);
- status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
- NV_PMC_INTR_0_NV50_I2C_PENDING);
- }
-
- if (status)
- NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
-
+ dev_priv->irq_handler[status_bit] = NULL;
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30dc4b4..69044eb104b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
/*
* NV10-NV40 tiling helpers
*/
static void
-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_mem_update_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+ int i = tile - dev_priv->tile.reg;
+ unsigned long save;
- tile->addr = addr;
- tile->size = size;
- tile->used = !!pitch;
- nouveau_fence_unref((void **)&tile->fence);
+ nouveau_fence_unref(&tile->fence);
+ if (tile->pitch)
+ pfb->free_tile_region(dev, i);
+
+ if (pitch)
+ pfb->init_tile_region(dev, i, addr, size, pitch, flags);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, save);
pfifo->reassign(dev, false);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
- pgraph->set_region_tiling(dev, i, addr, size, pitch);
- pfb->set_region_tiling(dev, i, addr, size, pitch);
+ pfb->set_tile_region(dev, i);
+ pgraph->set_tile_region(dev, i);
pfifo->cache_pull(dev, true);
pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
-struct nouveau_tile_reg *
-nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
- uint32_t pitch)
+static struct nouveau_tile_reg *
+nv10_mem_get_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_tile_reg *found = NULL;
- unsigned long i, flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- for (i = 0; i < pfb->num_tiles; i++) {
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
-
- if (tile->used)
- /* Tile region in use. */
- continue;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- if (tile->fence &&
- !nouveau_fence_signalled(tile->fence, NULL))
- /* Pending tile region. */
- continue;
-
- if (max(tile->addr, addr) <
- min(tile->addr + tile->size, addr + size))
- /* Kill an intersecting tile region. */
- nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
-
- if (pitch && !found) {
- /* Free tile region. */
- nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
- found = tile;
- }
- }
+ spin_lock(&dev_priv->tile.lock);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ if (!tile->used &&
+ (!tile->fence || nouveau_fence_signalled(tile->fence)))
+ tile->used = true;
+ else
+ tile = NULL;
- return found;
+ spin_unlock(&dev_priv->tile.lock);
+ return tile;
}
void
-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence)
-{
- if (fence) {
- /* Mark it as pending. */
- tile->fence = fence;
- nouveau_fence_ref(fence);
- }
-
- tile->used = false;
-}
-
-/*
- * NV50 VM helpers
- */
-int
-nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
- uint32_t flags, uint64_t phys)
+nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned block;
- int i;
- virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
- size = (size >> 16) << 1;
-
- phys |= ((uint64_t)flags << 32);
- phys |= 1;
- if (dev_priv->vram_sys_base) {
- phys += dev_priv->vram_sys_base;
- phys |= 0x30;
- }
-
- while (size) {
- unsigned offset_h = upper_32_bits(phys);
- unsigned offset_l = lower_32_bits(phys);
- unsigned pte, end;
-
- for (i = 7; i >= 0; i--) {
- block = 1 << (i + 1);
- if (size >= block && !(virt & (block - 1)))
- break;
+ if (tile) {
+ spin_lock(&dev_priv->tile.lock);
+ if (fence) {
+ /* Mark it as pending. */
+ tile->fence = fence;
+ nouveau_fence_ref(fence);
}
- offset_l |= (i << 7);
-
- phys += block << 15;
- size -= block;
-
- while (block) {
- pgt = dev_priv->vm_vram_pt[virt >> 14];
- pte = virt & 0x3ffe;
-
- end = pte + block;
- if (end > 16384)
- end = 16384;
- block -= (end - pte);
- virt += (end - pte);
-
- while (pte < end) {
- nv_wo32(pgt, (pte * 4) + 0, offset_l);
- nv_wo32(pgt, (pte * 4) + 4, offset_h);
- pte += 2;
- }
- }
- }
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
- return 0;
+ tile->used = false;
+ spin_unlock(&dev_priv->tile.lock);
+ }
}
-void
-nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned pages, pte, end;
-
- virt -= dev_priv->vm_vram_base;
- pages = (size >> 16) << 1;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_tile_reg *tile, *found = NULL;
+ int i;
- while (pages) {
- pgt = dev_priv->vm_vram_pt[virt >> 29];
- pte = (virt & 0x1ffe0000ULL) >> 15;
+ for (i = 0; i < pfb->num_tiles; i++) {
+ tile = nv10_mem_get_tile_region(dev, i);
- end = pte + pages;
- if (end > 16384)
- end = 16384;
- pages -= (end - pte);
- virt += (end - pte) << 15;
+ if (pitch && !found) {
+ found = tile;
+ continue;
- while (pte < end) {
- nv_wo32(pgt, (pte * 4), 0);
- pte++;
+ } else if (tile && tile->pitch) {
+ /* Kill an unused tile region. */
+ nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
}
+
+ nv10_mem_put_tile_region(dev, tile, NULL);
}
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
+ if (found)
+ nv10_mem_update_tile_region(dev, found, addr, size,
+ pitch, flags);
+ return found;
}
/*
@@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
-static void
-nv50_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru;
-
- r0 = nv_rd32(dev, 0x100200);
- r4 = nv_rd32(dev, 0x100204);
- rt = nv_rd32(dev, 0x100250);
- ru = nv_rd32(dev, 0x001540);
- NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = ((r4 & 0x01000000) ? 8 : 4);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != dev_priv->vram_size) {
- NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
- (u32)(dev_priv->vram_size >> 20));
- NV_WARN(dev, "we calculated %dMiB VRAM\n",
- (u32)(predicted >> 20));
- }
-
- dev_priv->vram_rblock_size = rowsize >> 12;
- if (rt & 1)
- dev_priv->vram_rblock_size *= 3;
-
- NV_DEBUG(dev, "rblock %lld bytes\n",
- (u64)dev_priv->vram_rblock_size << 12);
-}
-
-static void
-nvaa_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /* To our knowledge, there's no large scale reordering of pages
- * that occurs on IGP chipsets.
- */
- dev_priv->vram_rblock_size = 1;
-}
-
-static int
+int
nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,33 +255,6 @@ nouveau_mem_detect(struct drm_device *dev)
if (dev_priv->card_type < NV_50) {
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
- } else
- if (dev_priv->card_type < NV_C0) {
- dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
- dev_priv->vram_size &= 0xffffffff00ll;
-
- switch (dev_priv->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf:
- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
- dev_priv->vram_sys_base <<= 12;
- nvaa_vram_preinit(dev);
- break;
- default:
- nv50_vram_preinit(dev);
- break;
- }
- } else {
- dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
- dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
- }
-
- NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
- if (dev_priv->vram_sys_base) {
- NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
- dev_priv->vram_sys_base);
}
if (dev_priv->vram_size)
@@ -415,6 +262,15 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
+bool
+nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+ return true;
+
+ return false;
+}
+
#if __OS_HAS_AGP
static unsigned long
get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +403,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (ret)
return ret;
- ret = nouveau_mem_detect(dev);
- if (ret)
- return ret;
-
dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +418,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_available_size = dev_priv->vram_size;
- dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
- if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
- dev_priv->fb_mappable_pages =
- pci_resource_len(dev->pdev, 1);
- dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
/* reserve space at end of VRAM for PRAMIN */
if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +428,22 @@ nouveau_mem_vram_init(struct drm_device *dev)
else
dev_priv->ramin_rsvd_vram = (512 * 1024);
+ ret = dev_priv->engine.vram.init(dev);
+ if (ret)
+ return ret;
+
+ NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+ if (dev_priv->vram_sys_base) {
+ NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+ dev_priv->vram_sys_base);
+ }
+
+ dev_priv->fb_available_size = dev_priv->vram_size;
+ dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+ if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+ dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
+ dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
@@ -799,3 +660,118 @@ nouveau_mem_timing_fini(struct drm_device *dev)
kfree(mem->timing);
}
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_mm *mm;
+ u32 b_size;
+ int ret;
+
+ p_size = (p_size << PAGE_SHIFT) >> 12;
+ b_size = dev_priv->vram_rblock_size >> 12;
+
+ ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+ if (ret)
+ return ret;
+
+ man->priv = mm;
+ return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+ struct nouveau_mm *mm = man->priv;
+ int ret;
+
+ ret = nouveau_mm_fini(&mm);
+ if (ret)
+ return ret;
+
+ man->priv = NULL;
+ return 0;
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct drm_device *dev = dev_priv->dev;
+
+ vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct drm_device *dev = dev_priv->dev;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vram *node;
+ u32 size_nc = 0;
+ int ret;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+ size_nc = 1 << nvbo->vma.node->type;
+
+ ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
+ mem->page_alignment << PAGE_SHIFT, size_nc,
+ (nvbo->tile_flags >> 8) & 0xff, &node);
+ if (ret)
+ return ret;
+
+ node->page_shift = 12;
+ if (nvbo->vma.node)
+ node->page_shift = nvbo->vma.node->type;
+
+ mem->mm_node = node;
+ mem->start = node->offset >> PAGE_SHIFT;
+ return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
+ int i;
+
+ mutex_lock(&mm->mutex);
+ list_for_each_entry(r, &mm->nodes, nl_entry) {
+ printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
+ prefix, r->free ? "free" : "used", r->type,
+ ((u64)r->offset << 12),
+ (((u64)r->offset + r->length) << 12));
+ total += r->length;
+ ttotal[r->type] += r->length;
+ if (r->free)
+ tfree[r->type] += r->length;
+ else
+ tused[r->type] += r->length;
+ }
+ mutex_unlock(&mm->mutex);
+
+ printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
+ for (i = 0; i < 3; i++) {
+ printk(KERN_DEBUG "%s type %d: 0x%010llx, "
+ "used 0x%010llx, free 0x%010llx\n", prefix,
+ i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
+ }
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+ nouveau_vram_manager_init,
+ nouveau_vram_manager_fini,
+ nouveau_vram_manager_new,
+ nouveau_vram_manager_del,
+ nouveau_vram_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 00000000000..cdbb11eb701
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static inline void
+region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+{
+ list_del(&a->nl_entry);
+ list_del(&a->fl_entry);
+ kfree(a);
+}
+
+static struct nouveau_mm_node *
+region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+{
+ struct nouveau_mm_node *b;
+
+ if (a->length == size)
+ return a;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (unlikely(b == NULL))
+ return NULL;
+
+ b->offset = a->offset;
+ b->length = size;
+ b->free = a->free;
+ b->type = a->type;
+ a->offset += size;
+ a->length -= size;
+ list_add_tail(&b->nl_entry, &a->nl_entry);
+ if (b->free)
+ list_add_tail(&b->fl_entry, &a->fl_entry);
+ return b;
+}
+
+static struct nouveau_mm_node *
+nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+ struct nouveau_mm_node *prev, *next;
+
+ /* try to merge with free adjacent entries of same type */
+ prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
+ if (this->nl_entry.prev != &rmm->nodes) {
+ if (prev->free && prev->type == this->type) {
+ prev->length += this->length;
+ region_put(rmm, this);
+ this = prev;
+ }
+ }
+
+ next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+ if (this->nl_entry.next != &rmm->nodes) {
+ if (next->free && next->type == this->type) {
+ next->offset = this->offset;
+ next->length += this->length;
+ region_put(rmm, this);
+ this = next;
+ }
+ }
+
+ return this;
+}
+
+void
+nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+ u32 block_s, block_l;
+
+ this->free = true;
+ list_add(&this->fl_entry, &rmm->free);
+ this = nouveau_mm_merge(rmm, this);
+
+ /* any entirely free blocks now? we'll want to remove typing
+ * on them now so they can be use for any memory allocation
+ */
+ block_s = roundup(this->offset, rmm->block_size);
+ if (block_s + rmm->block_size > this->offset + this->length)
+ return;
+
+ /* split off any still-typed region at the start */
+ if (block_s != this->offset) {
+ if (!region_split(rmm, this, block_s - this->offset))
+ return;
+ }
+
+ /* split off the soon-to-be-untyped block(s) */
+ block_l = rounddown(this->length, rmm->block_size);
+ if (block_l != this->length) {
+ this = region_split(rmm, this, block_l);
+ if (!this)
+ return;
+ }
+
+ /* mark as having no type, and retry merge with any adjacent
+ * untyped blocks
+ */
+ this->type = 0;
+ nouveau_mm_merge(rmm, this);
+}
+
+int
+nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **pnode)
+{
+ struct nouveau_mm_node *this, *tmp, *next;
+ u32 splitoff, avail, alloc;
+
+ list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
+ next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+ if (this->nl_entry.next == &rmm->nodes)
+ next = NULL;
+
+ /* skip wrongly typed blocks */
+ if (this->type && this->type != type)
+ continue;
+
+ /* account for alignment */
+ splitoff = this->offset & (align - 1);
+ if (splitoff)
+ splitoff = align - splitoff;
+
+ if (this->length <= splitoff)
+ continue;
+
+ /* determine total memory available from this, and
+ * the next block (if appropriate)
+ */
+ avail = this->length;
+ if (next && next->free && (!next->type || next->type == type))
+ avail += next->length;
+
+ avail -= splitoff;
+
+ /* determine allocation size */
+ if (size_nc) {
+ alloc = min(avail, size);
+ alloc = rounddown(alloc, size_nc);
+ if (alloc == 0)
+ continue;
+ } else {
+ alloc = size;
+ if (avail < alloc)
+ continue;
+ }
+
+ /* untyped block, split off a chunk that's a multiple
+ * of block_size and type it
+ */
+ if (!this->type) {
+ u32 block = roundup(alloc + splitoff, rmm->block_size);
+ if (this->length < block)
+ continue;
+
+ this = region_split(rmm, this, block);
+ if (!this)
+ return -ENOMEM;
+
+ this->type = type;
+ }
+
+ /* stealing memory from adjacent block */
+ if (alloc > this->length) {
+ u32 amount = alloc - (this->length - splitoff);
+
+ if (!next->type) {
+ amount = roundup(amount, rmm->block_size);
+
+ next = region_split(rmm, next, amount);
+ if (!next)
+ return -ENOMEM;
+
+ next->type = type;
+ }
+
+ this->length += amount;
+ next->offset += amount;
+ next->length -= amount;
+ if (!next->length) {
+ list_del(&next->nl_entry);
+ list_del(&next->fl_entry);
+ kfree(next);
+ }
+ }
+
+ if (splitoff) {
+ if (!region_split(rmm, this, splitoff))
+ return -ENOMEM;
+ }
+
+ this = region_split(rmm, this, alloc);
+ if (this == NULL)
+ return -ENOMEM;
+
+ this->free = false;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+{
+ struct nouveau_mm *rmm;
+ struct nouveau_mm_node *heap;
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return -ENOMEM;
+ heap->free = true;
+ heap->offset = roundup(offset, block);
+ heap->length = rounddown(offset + length, block) - heap->offset;
+
+ rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
+ if (!rmm) {
+ kfree(heap);
+ return -ENOMEM;
+ }
+ rmm->block_size = block;
+ mutex_init(&rmm->mutex);
+ INIT_LIST_HEAD(&rmm->nodes);
+ INIT_LIST_HEAD(&rmm->free);
+ list_add(&heap->nl_entry, &rmm->nodes);
+ list_add(&heap->fl_entry, &rmm->free);
+
+ *prmm = rmm;
+ return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm **prmm)
+{
+ struct nouveau_mm *rmm = *prmm;
+ struct nouveau_mm_node *heap =
+ list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+
+ if (!list_is_singular(&rmm->nodes))
+ return -EBUSY;
+
+ kfree(heap);
+ kfree(rmm);
+ *prmm = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 00000000000..af384493303
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_REGION_H__
+#define __NOUVEAU_REGION_H__
+
+struct nouveau_mm_node {
+ struct list_head nl_entry;
+ struct list_head fl_entry;
+ struct list_head rl_entry;
+
+ bool free;
+ int type;
+
+ u32 offset;
+ u32 length;
+};
+
+struct nouveau_mm {
+ struct list_head nodes;
+ struct list_head free;
+
+ struct mutex mutex;
+
+ u32 block_size;
+};
+
+int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
+int nouveau_mm_fini(struct nouveau_mm **);
+int nouveau_mm_pre(struct nouveau_mm *);
+int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **);
+void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
+
+int nv50_vram_init(struct drm_device *);
+int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
+ u32 memtype, struct nouveau_vram **);
+void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+int nvc0_vram_init(struct drm_device *);
+int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nouveau_vram **);
+bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8c658..fe29d604b82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int size, uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
@@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
- target = NV_DMA_TARGET_VIDMEM;
- } else
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
- if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
- dev_priv->card_type < NV_50) {
- ret = nouveau_sgdma_get_page(dev, offset, &offset);
- if (ret)
- return ret;
- target = NV_DMA_TARGET_PCI;
- } else {
- target = NV_DMA_TARGET_AGP;
- if (dev_priv->card_type >= NV_50)
- offset += dev_priv->vm_gart_base;
- }
- } else {
- NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
- chan->notifier_bo->bo.mem.mem_type);
- return -EINVAL;
- }
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ target = NV_MEM_TARGET_VRAM;
+ else
+ target = NV_MEM_TARGET_GART;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
- mem->size, NV_DMA_ACCESS_RW, target,
+ mem->size, NV_MEM_ACCESS_RW, target,
&nobj);
if (ret) {
drm_mm_put_block(mem);
@@ -181,15 +164,20 @@ int
nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_notifierobj_alloc *na = data;
struct nouveau_channel *chan;
int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+ /* completely unnecessary for these chipsets... */
+ if (unlikely(dev_priv->card_type >= NV_C0))
+ return -EINVAL;
+
+ chan = nouveau_channel_get(dev, file_priv, na->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
- if (ret)
- return ret;
-
- return 0;
+ nouveau_channel_put(&chan);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572adca02..30b6544467c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nouveau_gpuobj_method {
+ struct list_head head;
+ u32 mthd;
+ int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
+};
+
+struct nouveau_gpuobj_class {
+ struct list_head head;
+ struct list_head methods;
+ u32 id;
+ u32 engine;
+};
+
+int
+nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_class *oc;
+
+ oc = kzalloc(sizeof(*oc), GFP_KERNEL);
+ if (!oc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&oc->methods);
+ oc->id = class;
+ oc->engine = engine;
+ list_add(&oc->head, &dev_priv->classes);
+ return 0;
+}
+
+int
+nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
+ int (*exec)(struct nouveau_channel *, u32, u32, u32))
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_method *om;
+ struct nouveau_gpuobj_class *oc;
+
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id == class)
+ goto found;
+ }
+
+ return -EINVAL;
+
+found:
+ om = kzalloc(sizeof(*om), GFP_KERNEL);
+ if (!om)
+ return -ENOMEM;
+
+ om->mthd = mthd;
+ om->exec = exec;
+ list_add(&om->head, &oc->methods);
+ return 0;
+}
+
+int
+nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj_method *om;
+ struct nouveau_gpuobj_class *oc;
+
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id != class)
+ continue;
+
+ list_for_each_entry(om, &oc->methods, head) {
+ if (om->mthd == mthd)
+ return om->exec(chan, class, mthd, data);
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+ chan = dev_priv->channels.ptr[chid];
+ if (chan)
+ ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return ret;
+}
/* NVidia uses context objects to drive drawing operations.
@@ -73,17 +169,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
struct nouveau_gpuobj **gpuobj_ret)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *gpuobj;
struct drm_mm_node *ramin = NULL;
- int ret;
+ int ret, i;
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
chan ? chan->id : -1, size, align, flags);
- if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
- return -EINVAL;
-
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
return -ENOMEM;
@@ -98,88 +191,41 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
spin_unlock(&dev_priv->ramin_lock);
if (chan) {
- NV_DEBUG(dev, "channel heap\n");
-
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
if (ramin)
ramin = drm_mm_get_block(ramin, size, align);
-
if (!ramin) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return -ENOMEM;
}
- } else {
- NV_DEBUG(dev, "global heap\n");
-
- /* allocate backing pages, sets vinst */
- ret = engine->instmem.populate(dev, gpuobj, &size);
- if (ret) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
- }
-
- /* try and get aperture space */
- do {
- if (drm_mm_pre_get(&dev_priv->ramin_heap))
- return -ENOMEM;
- spin_lock(&dev_priv->ramin_lock);
- ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
- align, 0);
- if (ramin == NULL) {
- spin_unlock(&dev_priv->ramin_lock);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
-
- ramin = drm_mm_get_block_atomic(ramin, size, align);
- spin_unlock(&dev_priv->ramin_lock);
- } while (ramin == NULL);
-
- /* on nv50 it's ok to fail, we have a fallback path */
- if (!ramin && dev_priv->card_type < NV_50) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
- }
+ gpuobj->pinst = chan->ramin->pinst;
+ if (gpuobj->pinst != ~0)
+ gpuobj->pinst += ramin->start;
- /* if we got a chunk of the aperture, map pages into it */
- gpuobj->im_pramin = ramin;
- if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
- ret = engine->instmem.bind(dev, gpuobj);
+ gpuobj->cinst = ramin->start;
+ gpuobj->vinst = ramin->start + chan->ramin->vinst;
+ gpuobj->node = ramin;
+ } else {
+ ret = instmem->get(gpuobj, size, align);
if (ret) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
}
- }
-
- /* calculate the various different addresses for the object */
- if (chan) {
- gpuobj->pinst = chan->ramin->pinst;
- if (gpuobj->pinst != ~0)
- gpuobj->pinst += gpuobj->im_pramin->start;
- if (dev_priv->card_type < NV_50) {
- gpuobj->cinst = gpuobj->pinst;
- } else {
- gpuobj->cinst = gpuobj->im_pramin->start;
- gpuobj->vinst = gpuobj->im_pramin->start +
- chan->ramin->vinst;
- }
- } else {
- if (gpuobj->im_pramin)
- gpuobj->pinst = gpuobj->im_pramin->start;
- else
+ ret = -ENOSYS;
+ if (!(flags & NVOBJ_FLAG_DONT_MAP))
+ ret = instmem->map(gpuobj);
+ if (ret)
gpuobj->pinst = ~0;
- gpuobj->cinst = 0xdeadbeef;
+
+ gpuobj->cinst = NVOBJ_CINST_GLOBAL;
}
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
- int i;
-
for (i = 0; i < gpuobj->size; i += 4)
nv_wo32(gpuobj, i, 0);
- engine->instmem.flush(dev);
+ instmem->flush(dev);
}
@@ -195,6 +241,7 @@ nouveau_gpuobj_init(struct drm_device *dev)
NV_DEBUG(dev, "\n");
INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+ INIT_LIST_HEAD(&dev_priv->classes);
spin_lock_init(&dev_priv->ramin_lock);
dev_priv->ramin_base = ~0;
@@ -205,9 +252,20 @@ void
nouveau_gpuobj_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_method *om, *tm;
+ struct nouveau_gpuobj_class *oc, *tc;
NV_DEBUG(dev, "\n");
+ list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
+ list_for_each_entry_safe(om, tm, &oc->methods, head) {
+ list_del(&om->head);
+ kfree(om);
+ }
+ list_del(&oc->head);
+ kfree(oc);
+ }
+
BUG_ON(!list_empty(&dev_priv->gpuobj_list));
}
@@ -219,26 +277,34 @@ nouveau_gpuobj_del(struct kref *ref)
container_of(ref, struct nouveau_gpuobj, refcount);
struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
int i;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
- if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+ if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
for (i = 0; i < gpuobj->size; i += 4)
nv_wo32(gpuobj, i, 0);
- engine->instmem.flush(dev);
+ instmem->flush(dev);
}
if (gpuobj->dtor)
gpuobj->dtor(dev, gpuobj);
- if (gpuobj->im_backing)
- engine->instmem.clear(dev, gpuobj);
+ if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
+ if (gpuobj->node) {
+ instmem->unmap(gpuobj);
+ instmem->put(gpuobj);
+ }
+ } else {
+ if (gpuobj->node) {
+ spin_lock(&dev_priv->ramin_lock);
+ drm_mm_put_block(gpuobj->node);
+ spin_unlock(&dev_priv->ramin_lock);
+ }
+ }
spin_lock(&dev_priv->ramin_lock);
- if (gpuobj->im_pramin)
- drm_mm_put_block(gpuobj->im_pramin);
list_del(&gpuobj->list);
spin_unlock(&dev_priv->ramin_lock);
@@ -278,7 +344,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
kref_init(&gpuobj->refcount);
gpuobj->size = size;
gpuobj->pinst = pinst;
- gpuobj->cinst = 0xdeadbeef;
+ gpuobj->cinst = NVOBJ_CINST_GLOBAL;
gpuobj->vinst = vinst;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,113 +401,150 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
The method below creates a DMA object in instance RAM and returns a handle
to it that can be used to set up context objects.
*/
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
- uint64_t offset, uint64_t size, int access,
- int target, struct nouveau_gpuobj **gpuobj)
+
+void
+nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
+ u64 base, u64 size, int target, int access,
+ u32 type, u32 comp)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- int ret;
+ struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ u32 flags0;
- NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
- chan->id, class, offset, size);
- NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+ flags0 = (comp << 29) | (type << 22) | class;
+ flags0 |= 0x00100000;
+
+ switch (access) {
+ case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
+ case NV_MEM_ACCESS_RW:
+ case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
+ default:
+ break;
+ }
switch (target) {
- case NV_DMA_TARGET_AGP:
- offset += dev_priv->gart_info.aper_base;
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
break;
+ case NV_MEM_TARGET_GART:
+ base += dev_priv->gart_info.aper_base;
default:
+ flags0 &= ~0x00100000;
break;
}
- ret = nouveau_gpuobj_new(dev, chan,
- nouveau_gpuobj_class_instmem_size(dev, class),
- 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, gpuobj);
- if (ret) {
- NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
- return ret;
- }
+ /* convert to base + limit */
+ size = (base + size) - 1;
- if (dev_priv->card_type < NV_50) {
- uint32_t frame, adjust, pte_flags = 0;
-
- if (access != NV_DMA_ACCESS_RO)
- pte_flags |= (1<<1);
- adjust = offset & 0x00000fff;
- frame = offset & ~0x00000fff;
-
- nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
- (access << 14) | (target << 16) |
- class));
- nv_wo32(*gpuobj, 4, size - 1);
- nv_wo32(*gpuobj, 8, frame | pte_flags);
- nv_wo32(*gpuobj, 12, frame | pte_flags);
- } else {
- uint64_t limit = offset + size - 1;
- uint32_t flags0, flags5;
+ nv_wo32(obj, offset + 0x00, flags0);
+ nv_wo32(obj, offset + 0x04, lower_32_bits(size));
+ nv_wo32(obj, offset + 0x08, lower_32_bits(base));
+ nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
+ upper_32_bits(base));
+ nv_wo32(obj, offset + 0x10, 0x00000000);
+ nv_wo32(obj, offset + 0x14, 0x00000000);
- if (target == NV_DMA_TARGET_VIDMEM) {
- flags0 = 0x00190000;
- flags5 = 0x00010000;
- } else {
- flags0 = 0x7fc00000;
- flags5 = 0x00080000;
- }
+ pinstmem->flush(obj->dev);
+}
- nv_wo32(*gpuobj, 0, flags0 | class);
- nv_wo32(*gpuobj, 4, lower_32_bits(limit));
- nv_wo32(*gpuobj, 8, lower_32_bits(offset));
- nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
- (upper_32_bits(offset) & 0xff));
- nv_wo32(*gpuobj, 20, flags5);
- }
+int
+nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
+ int target, int access, u32 type, u32 comp,
+ struct nouveau_gpuobj **pobj)
+{
+ struct drm_device *dev = chan->dev;
+ int ret;
- instmem->flush(dev);
+ ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
+ if (ret)
+ return ret;
- (*gpuobj)->engine = NVOBJ_ENGINE_SW;
- (*gpuobj)->class = class;
+ nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
+ access, type, comp);
return 0;
}
int
-nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
- uint64_t offset, uint64_t size, int access,
- struct nouveau_gpuobj **gpuobj,
- uint32_t *o_ret)
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
+ u64 size, int access, int target,
+ struct nouveau_gpuobj **pobj)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj;
+ u32 flags0, flags2;
int ret;
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
- (dev_priv->card_type >= NV_50 &&
- dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- offset + dev_priv->vm_gart_base,
- size, access, NV_DMA_TARGET_AGP,
- gpuobj);
- if (o_ret)
- *o_ret = 0;
- } else
- if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
- if (offset & ~0xffffffffULL) {
- NV_ERROR(dev, "obj offset exceeds 32-bits\n");
- return -EINVAL;
+ if (dev_priv->card_type >= NV_50) {
+ u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
+ u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
+
+ return nv50_gpuobj_dma_new(chan, class, base, size,
+ target, access, type, comp, pobj);
+ }
+
+ if (target == NV_MEM_TARGET_GART) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ target = NV_MEM_TARGET_PCI_NOSNOOP;
+ base += dev_priv->gart_info.aper_base;
+ } else
+ if (base != 0) {
+ base = nouveau_sgdma_get_physical(dev, base);
+ target = NV_MEM_TARGET_PCI;
+ } else {
+ nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
+ return 0;
}
- if (o_ret)
- *o_ret = (uint32_t)offset;
- ret = (*gpuobj != NULL) ? 0 : -EINVAL;
- } else {
- NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
- return -EINVAL;
}
- return ret;
+ flags0 = class;
+ flags0 |= 0x00003000; /* PT present, PT linear */
+ flags2 = 0;
+
+ switch (target) {
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
+ break;
+ default:
+ break;
+ }
+
+ switch (access) {
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00004000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ flags0 |= 0x00008000;
+ default:
+ flags2 |= 0x00000002;
+ break;
+ }
+
+ flags0 |= (base & 0x00000fff) << 20;
+ flags2 |= (base & 0xfffff000);
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+
+ nv_wo32(obj, 0x00, flags0);
+ nv_wo32(obj, 0x04, size - 1);
+ nv_wo32(obj, 0x08, flags2);
+ nv_wo32(obj, 0x0c, flags2);
+
+ obj->engine = NVOBJ_ENGINE_SW;
+ obj->class = class;
+ *pobj = obj;
+ return 0;
}
/* Context objects in the instance RAM have the following structure.
@@ -495,82 +598,130 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
entry[5]:
set to 0?
*/
+static int
+nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ gpuobj->dev = chan->dev;
+ gpuobj->engine = NVOBJ_ENGINE_SW;
+ gpuobj->class = class;
+ kref_init(&gpuobj->refcount);
+ gpuobj->cinst = 0x40;
+
+ spin_lock(&dev_priv->ramin_lock);
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
- struct nouveau_gpuobj **gpuobj)
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_class *oc;
+ struct nouveau_gpuobj *gpuobj;
int ret;
NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id == class)
+ goto found;
+ }
+
+ NV_ERROR(dev, "illegal object class: 0x%x\n", class);
+ return -EINVAL;
+
+found:
+ switch (oc->engine) {
+ case NVOBJ_ENGINE_SW:
+ if (dev_priv->card_type < NV_C0) {
+ ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
+ if (ret)
+ return ret;
+ goto insert;
+ }
+ break;
+ case NVOBJ_ENGINE_GR:
+ if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
+ (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
+ struct nouveau_pgraph_engine *pgraph =
+ &dev_priv->engine.graph;
+
+ ret = pgraph->create_context(chan);
+ if (ret)
+ return ret;
+ }
+ break;
+ case NVOBJ_ENGINE_CRYPT:
+ if (!chan->crypt_ctx) {
+ struct nouveau_crypt_engine *pcrypt =
+ &dev_priv->engine.crypt;
+
+ ret = pcrypt->create_context(chan);
+ if (ret)
+ return ret;
+ }
+ break;
+ }
+
+ /* we're done if this is fermi */
+ if (dev_priv->card_type >= NV_C0)
+ return 0;
+
ret = nouveau_gpuobj_new(dev, chan,
nouveau_gpuobj_class_instmem_size(dev, class),
16,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
- gpuobj);
+ &gpuobj);
if (ret) {
- NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+ NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
return ret;
}
if (dev_priv->card_type >= NV_50) {
- nv_wo32(*gpuobj, 0, class);
- nv_wo32(*gpuobj, 20, 0x00010000);
+ nv_wo32(gpuobj, 0, class);
+ nv_wo32(gpuobj, 20, 0x00010000);
} else {
switch (class) {
case NV_CLASS_NULL:
- nv_wo32(*gpuobj, 0, 0x00001030);
- nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
+ nv_wo32(gpuobj, 0, 0x00001030);
+ nv_wo32(gpuobj, 4, 0xFFFFFFFF);
break;
default:
if (dev_priv->card_type >= NV_40) {
- nv_wo32(*gpuobj, 0, class);
+ nv_wo32(gpuobj, 0, class);
#ifdef __BIG_ENDIAN
- nv_wo32(*gpuobj, 8, 0x01000000);
+ nv_wo32(gpuobj, 8, 0x01000000);
#endif
} else {
#ifdef __BIG_ENDIAN
- nv_wo32(*gpuobj, 0, class | 0x00080000);
+ nv_wo32(gpuobj, 0, class | 0x00080000);
#else
- nv_wo32(*gpuobj, 0, class);
+ nv_wo32(gpuobj, 0, class);
#endif
}
}
}
dev_priv->engine.instmem.flush(dev);
- (*gpuobj)->engine = NVOBJ_ENGINE_GR;
- (*gpuobj)->class = class;
- return 0;
-}
+ gpuobj->engine = oc->engine;
+ gpuobj->class = oc->id;
-int
-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
- struct nouveau_gpuobj **gpuobj_ret)
-{
- struct drm_nouveau_private *dev_priv;
- struct nouveau_gpuobj *gpuobj;
-
- if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
- return -EINVAL;
- dev_priv = chan->dev->dev_private;
-
- gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
- if (!gpuobj)
- return -ENOMEM;
- gpuobj->dev = chan->dev;
- gpuobj->engine = NVOBJ_ENGINE_SW;
- gpuobj->class = class;
- kref_init(&gpuobj->refcount);
- gpuobj->cinst = 0x40;
-
- spin_lock(&dev_priv->ramin_lock);
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
- spin_unlock(&dev_priv->ramin_lock);
- *gpuobj_ret = gpuobj;
- return 0;
+insert:
+ ret = nouveau_ramht_insert(chan, handle, gpuobj);
+ if (ret)
+ NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return ret;
}
static int
@@ -585,7 +736,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
/* Base amount for object storage (4KiB enough?) */
- size = 0x1000;
+ size = 0x2000;
base = 0;
/* PGRAPH context */
@@ -624,12 +775,30 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret, i;
+ int ret;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+ if (dev_priv->card_type == NV_C0) {
+ struct nouveau_vm *vm = dev_priv->chan_vm;
+ struct nouveau_vm_pgd *vpgd;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
+ &chan->ramin);
+ if (ret)
+ return ret;
+
+ nouveau_vm_ref(vm, &chan->vm, NULL);
+
+ vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+ nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+ return 0;
+ }
+
/* Allocate a chunk of memory for per-channel object storage */
ret = nouveau_gpuobj_channel_init_pramin(chan);
if (ret) {
@@ -639,14 +808,12 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* NV50 VM
* - Allocate per-channel page-directory
- * - Map GART and VRAM into the channel's address space at the
- * locations determined during init.
+ * - Link with shared channel VM
*/
- if (dev_priv->card_type >= NV_50) {
+ if (dev_priv->chan_vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst;
- u32 pde;
if (vm_pinst != ~0)
vm_pinst += pgd_offs;
@@ -655,29 +822,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
0, &chan->vm_pd);
if (ret)
return ret;
- for (i = 0; i < 0x4000; i += 8) {
- nv_wo32(chan->vm_pd, i + 0, 0x00000000);
- nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
- }
-
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
- &chan->vm_gart_pt);
- pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
- nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
- nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-
- pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
- &chan->vm_vram_pt[i]);
-
- nv_wo32(chan->vm_pd, pde + 0,
- chan->vm_vram_pt[i]->vinst | 0x61);
- nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
- pde += 8;
- }
- instmem->flush(dev);
+ nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
}
/* RAMHT */
@@ -700,9 +846,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* VRAM ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vm_end,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_AGP, &vram);
+ 0, (1ULL << 40), NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
@@ -710,8 +855,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
} else {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->fb_available_size,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &vram);
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
@@ -728,21 +873,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* TT memory ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vm_end,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_AGP, &tt);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
- return ret;
- }
- } else
- if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
- ret = nouveau_gpuobj_gart_dma_new(chan, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &tt, NULL);
+ 0, (1ULL << 40), NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VM, &tt);
} else {
- NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
- ret = -EINVAL;
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_GART, &tt);
}
if (ret) {
@@ -763,21 +900,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (!chan->ramht)
- return;
-
nouveau_ramht_ref(NULL, &chan->ramht, chan);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +921,91 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
struct nouveau_gpuobj *gpuobj;
int i;
- if (dev_priv->card_type < NV_50) {
- dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
- if (!dev_priv->susres.ramin_copy)
- return -ENOMEM;
-
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
- dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
- return 0;
- }
-
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing)
+ if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
continue;
- gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
- if (!gpuobj->im_backing_suspend) {
+ gpuobj->suspend = vmalloc(gpuobj->size);
+ if (!gpuobj->suspend) {
nouveau_gpuobj_resume(dev);
return -ENOMEM;
}
for (i = 0; i < gpuobj->size; i += 4)
- gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
+ gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
}
return 0;
}
void
-nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
-
- if (dev_priv->card_type < NV_50) {
- vfree(dev_priv->susres.ramin_copy);
- dev_priv->susres.ramin_copy = NULL;
- return;
- }
-
- list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing_suspend)
- continue;
-
- vfree(gpuobj->im_backing_suspend);
- gpuobj->im_backing_suspend = NULL;
- }
-}
-
-void
nouveau_gpuobj_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj;
int i;
- if (dev_priv->card_type < NV_50) {
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
- nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
- nouveau_gpuobj_suspend_cleanup(dev);
- return;
- }
-
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing_suspend)
+ if (!gpuobj->suspend)
continue;
for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
- dev_priv->engine.instmem.flush(dev);
+ nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
+
+ vfree(gpuobj->suspend);
+ gpuobj->suspend = NULL;
}
- nouveau_gpuobj_suspend_cleanup(dev);
+ dev_priv->engine.instmem.flush(dev);
}
int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_grobj_alloc *init = data;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_pgraph_object_class *grc;
- struct nouveau_gpuobj *gr = NULL;
struct nouveau_channel *chan;
int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
-
if (init->handle == ~0)
return -EINVAL;
- grc = pgraph->grclass;
- while (grc->id) {
- if (grc->id == init->class)
- break;
- grc++;
- }
+ chan = nouveau_channel_get(dev, file_priv, init->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- if (!grc->id) {
- NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
- return -EPERM;
+ if (nouveau_ramht_find(chan, init->handle)) {
+ ret = -EEXIST;
+ goto out;
}
- if (nouveau_ramht_find(chan, init->handle))
- return -EEXIST;
-
- if (!grc->software)
- ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
- else
- ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+ ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
if (ret) {
NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
- return ret;
}
- ret = nouveau_ramht_insert(chan, init->handle, gr);
- nouveau_gpuobj_ref(NULL, &gr);
- if (ret) {
- NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
- ret, init->channel, init->handle);
- return ret;
- }
-
- return 0;
+out:
+ nouveau_channel_put(&chan);
+ return ret;
}
int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_gpuobj *gpuobj;
struct nouveau_channel *chan;
+ int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- gpuobj = nouveau_ramht_find(chan, objfree->handle);
- if (!gpuobj)
- return -ENOENT;
+ /* Synchronize with the user channel */
+ nouveau_channel_idle(chan);
- nouveau_ramht_remove(chan, objfree->handle);
- return 0;
+ ret = nouveau_ramht_remove(chan, objfree->handle);
+ nouveau_channel_put(&chan);
+ return ret;
}
u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158f582..fb846a3fef1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -418,8 +422,7 @@ nouveau_hwmon_init(struct drm_device *dev)
return ret;
}
dev_set_drvdata(hwmon_dev, dev);
- ret = sysfs_create_group(&hwmon_dev->kobj,
- &hwmon_attrgroup);
+ ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
if (ret) {
NV_ERROR(dev,
"Unable to create hwmon sysfs file: %d\n", ret);
@@ -446,6 +449,25 @@ nouveau_hwmon_fini(struct drm_device *dev)
#endif
}
+#ifdef CONFIG_ACPI
+static int
+nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct drm_nouveau_private *dev_priv =
+ container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
+ struct drm_device *dev = dev_priv->dev;
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+ if (strcmp(entry->device_class, "ac_adapter") == 0) {
+ bool ac = power_supply_is_system_supplied();
+
+ NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
+ }
+
+ return NOTIFY_OK;
+}
+#endif
+
int
nouveau_pm_init(struct drm_device *dev)
{
@@ -485,6 +507,10 @@ nouveau_pm_init(struct drm_device *dev)
nouveau_sysfs_init(dev);
nouveau_hwmon_init(dev);
+#ifdef CONFIG_ACPI
+ pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
+ register_acpi_notifier(&pm->acpi_nb);
+#endif
return 0;
}
@@ -503,6 +529,9 @@ nouveau_pm_fini(struct drm_device *dev)
nouveau_perf_fini(dev);
nouveau_volt_fini(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&pm->acpi_nb);
+#endif
nouveau_hwmon_fini(dev);
nouveau_sysfs_fini(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d8580927ca..bef3e691041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+ ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
} else
if (dev_priv->card_type < NV_50) {
- ctx = (gpuobj->cinst >> 4) |
+ ctx = (gpuobj->pinst >> 4) |
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) | 2;
+ ctx = (gpuobj->cinst << 10) | chan->id;
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<
@@ -214,18 +214,19 @@ out:
spin_unlock_irqrestore(&chan->ramht->lock, flags);
}
-void
+int
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
{
struct nouveau_ramht_entry *entry;
entry = nouveau_ramht_remove_entry(chan, handle);
if (!entry)
- return;
+ return -ENOENT;
nouveau_ramht_remove_hash(chan, entry->handle);
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
kfree(entry);
+ return 0;
}
struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e1a8f..c82de98fee0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
struct nouveau_gpuobj *);
-extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
extern struct nouveau_gpuobj *
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541ca9e..04e8fb79526 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
#define NV04_PFB_PRE 0x001002d4
# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
+#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i))
+# define NV20_PFB_ZCOMP_MODE_32 (4 << 24)
+# define NV20_PFB_ZCOMP_EN (1 << 31)
+# define NV25_PFB_ZCOMP_MODE_16 (1 << 20)
+# define NV25_PFB_ZCOMP_MODE_32 (2 << 20)
#define NV10_PFB_CLOSE_PAGE2 0x0010033c
#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
-/* DMA object defines */
-#define NV_DMA_ACCESS_RW 0
-#define NV_DMA_ACCESS_RO 1
-#define NV_DMA_ACCESS_WO 2
-#define NV_DMA_TARGET_VIDMEM 0
-#define NV_DMA_TARGET_PCI 2
-#define NV_DMA_TARGET_AGP 3
-/* The following is not a real value used by the card, it's changed by
- * nouveau_object_dma_create */
-#define NV_DMA_TARGET_PCI_NONLINEAR 8
-
/* Some object classes we care about in the drm */
#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
#define NV_CLASS_DMA_TO_MEMORY 0x00000003
@@ -332,6 +326,7 @@
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
#define NV03_PGRAPH_STATUS 0x004006B0
#define NV04_PGRAPH_STATUS 0x00400700
+# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
#define NV04_PGRAPH_SURFACE 0x0040070C
@@ -378,6 +373,7 @@
#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
-#define NV50_PDISPLAY_INTR_EN 0x0061002c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
+#define NV50_PDISPLAY_INTR_EN_0 0x00610028
+#define NV50_PDISPLAY_INTR_EN_1 0x0061002c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040
#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
-#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
-#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
-#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
-#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
-#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
-#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
+#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080)
+#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084)
+#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010
+#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000
+#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010
+#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001
+#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c)
#define NV50_PDISPLAY_CURSOR 0x00610270
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
-#define NV50_PDISPLAY_CTRL_STATE 0x00610300
-#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
-#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
-#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
-#define NV50_PDISPLAY_CTRL_VAL 0x00610304
-#define NV50_PDISPLAY_UNK_380 0x00610380
-#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
-#define NV50_PDISPLAY_UNK_388 0x00610388
-#define NV50_PDISPLAY_UNK_38C 0x0061038c
+#define NV50_PDISPLAY_PIO_CTRL 0x00610300
+#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc
+#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001
+#define NV50_PDISPLAY_PIO_DATA 0x00610304
#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac9700703..9a250eb5309 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
dma_addr_t *pages;
unsigned nr_pages;
- unsigned pte_start;
+ u64 offset;
bool bound;
};
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
}
}
-static inline unsigned
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
-
- if (dev_priv->card_type < NV_50)
- return pte + 2;
-
- return pte << 1;
-}
-
static int
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
- nvbe->pte_start = pte;
+ nvbe->offset = mem->start << PAGE_SHIFT;
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i];
uint32_t offset_l = lower_32_bits(dma_offset);
- uint32_t offset_h = upper_32_bits(dma_offset);
-
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50) {
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
- pte += 1;
- } else {
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
- nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
- pte += 2;
- }
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
- dev_priv->engine.instmem.flush(nvbe->dev);
-
- if (dev_priv->card_type == NV_50) {
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- }
nvbe->bound = true;
return 0;
@@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
if (!nvbe->bound)
return 0;
- pte = nvbe->pte_start;
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
- dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
-
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50) {
- nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
- pte += 1;
- } else {
- nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
- nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
- pte += 2;
- }
-
- dma_offset += NV_CTXDMA_PAGE_SIZE;
- }
- }
- dev_priv->engine.instmem.flush(nvbe->dev);
-
- if (dev_priv->card_type == NV_50) {
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+ nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
nvbe->bound = false;
@@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
}
}
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ nvbe->offset = mem->start << PAGE_SHIFT;
+
+ nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
+ nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ if (!nvbe->bound)
+ return 0;
+
+ nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
+ nvbe->nr_pages << PAGE_SHIFT);
+ nvbe->bound = false;
+ return 0;
+}
+
static struct ttm_backend_func nouveau_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
.destroy = nouveau_sgdma_destroy
};
+static struct ttm_backend_func nv50_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv50_sgdma_bind,
+ .unbind = nv50_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
struct ttm_backend *
nouveau_sgdma_init_ttm(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_sgdma_be *nvbe;
- if (!dev_priv->gart_info.sg_ctxdma)
- return NULL;
-
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
nvbe->dev = dev;
- nvbe->backend.func = &nouveau_sgdma_backend;
-
+ if (dev_priv->card_type < NV_50)
+ nvbe->backend.func = &nouveau_sgdma_backend;
+ else
+ nvbe->backend.func = &nv50_sgdma_backend;
return &nvbe->backend;
}
@@ -218,7 +209,6 @@ int
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
@@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev)
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
obj_size += 8; /* ctxdma header */
- } else {
- /* 1 entire VM page table */
- aper_size = (512 * 1024 * 1024);
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
- }
-
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
- if (ret) {
- NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
- return ret;
- }
-
- dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
- if (!dev_priv->gart_info.sg_dummy_page) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
- set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
- dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -EFAULT;
- }
+ ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
- if (dev_priv->card_type < NV_50) {
- /* special case, allocated from global instmem heap so
- * cinst is invalid, we use it on all channels though so
- * cinst needs to be valid, set it the same as pinst
- */
- gpuobj->cinst = gpuobj->pinst;
-
- /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
- * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
- * on those cards? */
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
(1 << 12) /* PT present */ |
(0 << 13) /* PT *not* linear */ |
- (NV_DMA_ACCESS_RW << 14) |
- (NV_DMA_TARGET_PCI << 16));
+ (0 << 14) /* RW */ |
+ (2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
- for (i = 2; i < 2 + (aper_size >> 12); i++) {
- nv_wo32(gpuobj, i * 4,
- dev_priv->gart_info.sg_dummy_bus | 3);
- }
- } else {
- for (i = 0; i < obj_size; i += 8) {
- nv_wo32(gpuobj, i + 0, 0x00000000);
- nv_wo32(gpuobj, i + 4, 0x00000000);
- }
+ for (i = 2; i < 2 + (aper_size >> 12); i++)
+ nv_wo32(gpuobj, i * 4, 0x00000000);
+
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ } else
+ if (dev_priv->chan_vm) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
+ 12, NV_MEM_ACCESS_RW,
+ &dev_priv->gart_info.vma);
+ if (ret)
+ return ret;
+
+ dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+ dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
}
- dev_priv->engine.instmem.flush(dev);
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
- dev_priv->gart_info.aper_base = 0;
- dev_priv->gart_info.aper_size = aper_size;
- dev_priv->gart_info.sg_ctxdma = gpuobj;
return 0;
}
@@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->gart_info.sg_dummy_page) {
- pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
- NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- unlock_page(dev_priv->gart_info.sg_dummy_page);
- __free_page(dev_priv->gart_info.sg_dummy_page);
- dev_priv->gart_info.sg_dummy_page = NULL;
- dev_priv->gart_info.sg_dummy_bus = 0;
- }
-
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
+ nouveau_vm_put(&dev_priv->gart_info.vma);
}
-int
-nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+uint32_t
+nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
- int pte;
+ int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
- if (dev_priv->card_type < NV_50) {
- *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
- return 0;
- }
+ BUG_ON(dev_priv->card_type >= NV_50);
- NV_ERROR(dev, "Unimplemented on NV50\n");
- return -EINVAL;
+ return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
+ (offset & NV_CTXDMA_PAGE_MASK);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755567e..a54fc431fe9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
- engine->graph.grclass = nv04_graph_grclass;
engine->graph.init = nv04_graph_init;
engine->graph.takedown = nv04_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.unload_context = nv04_graph_unload_context;
engine->fifo.channels = 16;
engine->fifo.init = nv04_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
@@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x10:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv10_graph_grclass;
+ engine->fb.init_tile_region = nv10_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv10_fb_free_tile_region;
engine->graph.init = nv10_graph_init;
engine->graph.takedown = nv10_graph_takedown;
engine->graph.channel = nv10_graph_channel;
@@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.unload_context = nv10_graph_unload_context;
- engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv10_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x20:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv20_graph_grclass;
+ engine->fb.init_tile_region = nv10_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv10_fb_free_tile_region;
engine->graph.init = nv20_graph_init;
engine->graph.takedown = nv20_graph_takedown;
engine->graph.channel = nv10_graph_channel;
@@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
- engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x30:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv30_fb_init;
engine->fb.takedown = nv30_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv30_graph_grclass;
+ engine->fb.init_tile_region = nv30_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv30_fb_free_tile_region;
engine->graph.init = nv30_graph_init;
engine->graph.takedown = nv20_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
- engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_set = nv04_pm_clock_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x40:
case 0x60:
@@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv40_mc_init;
engine->mc.takedown = nv40_mc_takedown;
@@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
- engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
- engine->graph.grclass = nv40_graph_grclass;
+ engine->fb.init_tile_region = nv30_fb_init_tile_region;
+ engine->fb.set_tile_region = nv40_fb_set_tile_region;
+ engine->fb.free_tile_region = nv30_fb_free_tile_region;
engine->graph.init = nv40_graph_init;
engine->graph.takedown = nv40_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.unload_context = nv40_graph_unload_context;
- engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv40_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv40_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
- engine->fifo.destroy_context = nv40_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv40_fifo_load_context;
engine->fifo.unload_context = nv40_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv50_instmem_takedown;
engine->instmem.suspend = nv50_instmem_suspend;
engine->instmem.resume = nv50_instmem_resume;
- engine->instmem.populate = nv50_instmem_populate;
- engine->instmem.clear = nv50_instmem_clear;
- engine->instmem.bind = nv50_instmem_bind;
- engine->instmem.unbind = nv50_instmem_unbind;
+ engine->instmem.get = nv50_instmem_get;
+ engine->instmem.put = nv50_instmem_put;
+ engine->instmem.map = nv50_instmem_map;
+ engine->instmem.unmap = nv50_instmem_unmap;
if (dev_priv->chipset == 0x50)
engine->instmem.flush = nv50_instmem_flush;
else
@@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv50_fb_init;
engine->fb.takedown = nv50_fb_takedown;
- engine->graph.grclass = nv50_graph_grclass;
engine->graph.init = nv50_graph_init;
engine->graph.takedown = nv50_graph_takedown;
engine->graph.fifo_access = nv50_graph_fifo_access;
@@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.takedown = nv50_gpio_fini;
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
switch (dev_priv->chipset) {
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaf:
- engine->pm.clock_get = nva3_pm_clock_get;
- engine->pm.clock_pre = nva3_pm_clock_pre;
- engine->pm.clock_set = nva3_pm_clock_set;
- break;
- default:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ case 0x50:
engine->pm.clock_get = nv50_pm_clock_get;
engine->pm.clock_pre = nv50_pm_clock_pre;
engine->pm.clock_set = nv50_pm_clock_set;
break;
+ default:
+ engine->pm.clock_get = nva3_pm_clock_get;
+ engine->pm.clock_pre = nva3_pm_clock_pre;
+ engine->pm.clock_set = nva3_pm_clock_set;
+ break;
}
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
@@ -406,17 +436,39 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv84_temp_get;
else
engine->pm.temp_get = nv40_temp_get;
+ switch (dev_priv->chipset) {
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ engine->crypt.init = nv84_crypt_init;
+ engine->crypt.takedown = nv84_crypt_fini;
+ engine->crypt.create_context = nv84_crypt_create_context;
+ engine->crypt.destroy_context = nv84_crypt_destroy_context;
+ engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
+ break;
+ default:
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ break;
+ }
+ engine->vram.init = nv50_vram_init;
+ engine->vram.get = nv50_vram_new;
+ engine->vram.put = nv50_vram_del;
+ engine->vram.flags_valid = nv50_vram_flags_valid;
break;
case 0xC0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
engine->instmem.resume = nvc0_instmem_resume;
- engine->instmem.populate = nvc0_instmem_populate;
- engine->instmem.clear = nvc0_instmem_clear;
- engine->instmem.bind = nvc0_instmem_bind;
- engine->instmem.unbind = nvc0_instmem_unbind;
- engine->instmem.flush = nvc0_instmem_flush;
+ engine->instmem.get = nv50_instmem_get;
+ engine->instmem.put = nv50_instmem_put;
+ engine->instmem.map = nv50_instmem_map;
+ engine->instmem.unmap = nv50_instmem_unmap;
+ engine->instmem.flush = nv84_instmem_flush;
engine->mc.init = nv50_mc_init;
engine->mc.takedown = nv50_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->graph.grclass = NULL; //nvc0_graph_grclass;
engine->graph.init = nvc0_graph_init;
engine->graph.takedown = nvc0_graph_takedown;
engine->graph.fifo_access = nvc0_graph_fifo_access;
@@ -453,7 +504,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.takedown = nouveau_stub_takedown;
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nvc0_vram_init;
+ engine->vram.get = nvc0_vram_new;
+ engine->vram.put = nv50_vram_del;
+ engine->vram.flags_valid = nvc0_vram_flags_valid;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -493,9 +552,13 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
+ /* no dma objects on fermi... */
+ if (dev_priv->card_type >= NV_C0)
+ goto out_done;
+
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vram_size,
- NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+ NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
&gpuobj);
if (ret)
goto out_err;
@@ -505,9 +568,10 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
- ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &gpuobj, NULL);
+ ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
+ &gpuobj);
if (ret)
goto out_err;
@@ -516,11 +580,12 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
+out_done:
+ mutex_unlock(&dev_priv->channel->mutex);
return 0;
out_err:
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
+ nouveau_channel_put(&dev_priv->channel);
return ret;
}
@@ -531,15 +596,25 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pci_resume(pdev);
drm_kms_helper_poll_enable(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_pci_suspend(pdev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
+static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ nouveau_fbcon_output_poll_changed(dev);
+}
+
static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -560,6 +635,7 @@ nouveau_card_init(struct drm_device *dev)
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+ nouveau_switcheroo_reprobe,
nouveau_switcheroo_can_switch);
/* Initialise internal driver API hooks */
@@ -567,6 +643,8 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out;
engine = &dev_priv->engine;
+ spin_lock_init(&dev_priv->channels.lock);
+ spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
/* Make the CRTCs and I2C buses accessible */
@@ -625,26 +703,28 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_fb;
+ /* PCRYPT */
+ ret = engine->crypt.init(dev);
+ if (ret)
+ goto out_graph;
+
/* PFIFO */
ret = engine->fifo.init(dev);
if (ret)
- goto out_graph;
+ goto out_crypt;
}
ret = engine->display.create(dev);
if (ret)
goto out_fifo;
- /* this call irq_preinstall, register irq handler and
- * call irq_postinstall
- */
- ret = drm_irq_install(dev);
+ ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
if (ret)
- goto out_display;
+ goto out_vblank;
- ret = drm_vblank_init(dev, 0);
+ ret = nouveau_irq_init(dev);
if (ret)
- goto out_irq;
+ goto out_vblank;
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
@@ -669,12 +749,16 @@ nouveau_card_init(struct drm_device *dev)
out_fence:
nouveau_fence_fini(dev);
out_irq:
- drm_irq_uninstall(dev);
-out_display:
+ nouveau_irq_fini(dev);
+out_vblank:
+ drm_vblank_cleanup(dev);
engine->display.destroy(dev);
out_fifo:
if (!nouveau_noaccel)
engine->fifo.takedown(dev);
+out_crypt:
+ if (!nouveau_noaccel)
+ engine->crypt.takedown(dev);
out_graph:
if (!nouveau_noaccel)
engine->graph.takedown(dev);
@@ -713,12 +797,12 @@ static void nouveau_card_takedown(struct drm_device *dev)
if (!engine->graph.accel_blocked) {
nouveau_fence_fini(dev);
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
+ nouveau_channel_put_unlocked(&dev_priv->channel);
}
if (!nouveau_noaccel) {
engine->fifo.takedown(dev);
+ engine->crypt.takedown(dev);
engine->graph.takedown(dev);
}
engine->fb.takedown(dev);
@@ -737,7 +821,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_gpuobj_takedown(dev);
nouveau_mem_vram_fini(dev);
- drm_irq_uninstall(dev);
+ nouveau_irq_fini(dev);
+ drm_vblank_cleanup(dev);
nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
@@ -980,6 +1065,7 @@ err_out:
void nouveau_lastclose(struct drm_device *dev)
{
+ vga_switcheroo_process_delayed_switch();
}
int nouveau_unload(struct drm_device *dev)
@@ -1024,21 +1110,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
else
getparam->value = NV_PCI;
break;
- case NOUVEAU_GETPARAM_FB_PHYSICAL:
- getparam->value = dev_priv->fb_phys;
- break;
- case NOUVEAU_GETPARAM_AGP_PHYSICAL:
- getparam->value = dev_priv->gart_info.aper_base;
- break;
- case NOUVEAU_GETPARAM_PCI_PHYSICAL:
- if (dev->sg) {
- getparam->value = (unsigned long)dev->sg->virtual;
- } else {
- NV_ERROR(dev, "Requested PCIGART address, "
- "while no PCIGART was created\n");
- return -EINVAL;
- }
- break;
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = dev_priv->fb_available_size;
break;
@@ -1046,7 +1117,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev_priv->gart_info.aper_size;
break;
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
- getparam->value = dev_priv->vm_vram_base;
+ getparam->value = 0; /* deprecated */
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1125,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
getparam->value = 1;
break;
+ case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+ getparam->value = (dev_priv->card_type < NV_50);
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
@@ -1087,8 +1161,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
}
/* Wait until (value(reg) & mask) == val, up until timeout has hit */
-bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
- uint32_t reg, uint32_t mask, uint32_t val)
+bool
+nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1177,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
return false;
}
+/* Wait until (value(reg) & mask) != val, up until timeout has hit */
+bool
+nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if ((nv_rd32(dev, reg) & mask) != val)
+ return true;
+ } while (ptimer->read(dev) - start < timeout);
+
+ return false;
+}
+
/* Waits for PGRAPH to go completely idle */
bool nouveau_wait_for_idle(struct drm_device *dev)
{
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t mask = ~0;
+
+ if (dev_priv->card_type == NV_40)
+ mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
nv_rd32(dev, NV04_PGRAPH_STATUS));
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 00000000000..fbe0fb13bc1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/ratelimit.h>
+
+#include "nouveau_util.h"
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+ while (bf->name) {
+ if (value & bf->mask) {
+ printk(" %s", bf->name);
+ value &= ~bf->mask;
+ }
+
+ bf++;
+ }
+
+ if (value)
+ printk(" (unknown bits 0x%08x)", value);
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+ while (en->name) {
+ if (value == en->value) {
+ printk("%s", en->name);
+ return;
+ }
+
+ en++;
+ }
+
+ printk("(unknown enum 0x%08x)", value);
+}
+
+int
+nouveau_ratelimit(void)
+{
+ return __ratelimit(&nouveau_ratelimit_state);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 00000000000..d9ceaea26f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_UTIL_H__
+#define __NOUVEAU_UTIL_H__
+
+struct nouveau_bitfield {
+ u32 mask;
+ const char *name;
+};
+
+struct nouveau_enum {
+ u32 value;
+ const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+int nouveau_ratelimit(void);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 00000000000..97d82aedf86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+{
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mm_node *r;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ list_for_each_entry(r, &vram->regions, rl_entry) {
+ u64 phys = (u64)r->offset << 12;
+ u32 num = r->length >> bits;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map(vma, pgt, vram, pte, len, phys);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+{
+ nouveau_vm_map_at(vma, 0, vram);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+ dma_addr_t *list)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map_sg(vma, pgt, pte, list, len);
+
+ num -= len;
+ pte += len;
+ list += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->unmap(pgt, pte, len);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+ nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
+{
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_vm_pgt *vpgt;
+ struct nouveau_gpuobj *pgt;
+ u32 pde;
+
+ for (pde = fpde; pde <= lpde; pde++) {
+ vpgt = &vm->pgt[pde - vm->fpde];
+ if (--vpgt->refcount[big])
+ continue;
+
+ pgt = vpgt->obj[big];
+ vpgt->obj[big] = NULL;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ mutex_unlock(&vm->mm->mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ }
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_gpuobj *pgt;
+ int big = (type != vm->spg_shift);
+ u32 pgt_size;
+ int ret;
+
+ pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
+ pgt_size *= 8;
+
+ mutex_unlock(&vm->mm->mutex);
+ ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ if (unlikely(ret))
+ return ret;
+
+ /* someone beat us to filling the PDE while we didn't have the lock */
+ if (unlikely(vpgt->refcount[big]++)) {
+ mutex_unlock(&vm->mm->mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ return 0;
+ }
+
+ vpgt->obj[big] = pgt;
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *vma)
+{
+ u32 align = (1 << page_shift) >> 12;
+ u32 msize = size >> 12;
+ u32 fpde, lpde, pde;
+ int ret;
+
+ mutex_lock(&vm->mm->mutex);
+ ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&vm->mm->mutex);
+ return ret;
+ }
+
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+ for (pde = fpde; pde <= lpde; pde++) {
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ int big = (vma->node->type != vm->spg_shift);
+
+ if (likely(vpgt->refcount[big])) {
+ vpgt->refcount[big]++;
+ continue;
+ }
+
+ ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+ if (ret) {
+ if (pde != fpde)
+ nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
+ nouveau_mm_put(vm->mm, vma->node);
+ mutex_unlock(&vm->mm->mutex);
+ vma->node = NULL;
+ return ret;
+ }
+ }
+ mutex_unlock(&vm->mm->mutex);
+
+ vma->vm = vm;
+ vma->offset = (u64)vma->node->offset << 12;
+ vma->access = access;
+ return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+ struct nouveau_vm *vm = vma->vm;
+ u32 fpde, lpde;
+
+ if (unlikely(vma->node == NULL))
+ return;
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+
+ mutex_lock(&vm->mm->mutex);
+ nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
+ nouveau_mm_put(vm->mm, vma->node);
+ vma->node = NULL;
+ mutex_unlock(&vm->mm->mutex);
+}
+
+int
+nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **pvm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vm *vm;
+ u64 mm_length = (offset + length) - mm_offset;
+ u32 block, pgt_bits;
+ int ret;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ if (dev_priv->card_type == NV_50) {
+ vm->map_pgt = nv50_vm_map_pgt;
+ vm->map = nv50_vm_map;
+ vm->map_sg = nv50_vm_map_sg;
+ vm->unmap = nv50_vm_unmap;
+ vm->flush = nv50_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 16;
+
+ pgt_bits = 29;
+ block = (1 << pgt_bits);
+ if (length < block)
+ block = length;
+
+ } else
+ if (dev_priv->card_type == NV_C0) {
+ vm->map_pgt = nvc0_vm_map_pgt;
+ vm->map = nvc0_vm_map;
+ vm->map_sg = nvc0_vm_map_sg;
+ vm->unmap = nvc0_vm_unmap;
+ vm->flush = nvc0_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 17;
+ pgt_bits = 27;
+
+ /* Should be 4096 everywhere, this is a hack that's
+ * currently necessary to avoid an elusive bug that
+ * causes corruption when mixing small/large pages
+ */
+ if (length < (1ULL << 40))
+ block = 4096;
+ else {
+ block = (1 << pgt_bits);
+ if (length < block)
+ block = length;
+ }
+ } else {
+ kfree(vm);
+ return -ENOSYS;
+ }
+
+ vm->fpde = offset >> pgt_bits;
+ vm->lpde = (offset + length - 1) >> pgt_bits;
+ vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+ if (!vm->pgt) {
+ kfree(vm);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vm->pgd_list);
+ vm->dev = dev;
+ vm->refcount = 1;
+ vm->pgt_bits = pgt_bits - 12;
+
+ ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+ block >> 12);
+ if (ret) {
+ kfree(vm);
+ return ret;
+ }
+
+ *pvm = vm;
+ return 0;
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm_pgd *vpgd;
+ int i;
+
+ if (!pgd)
+ return 0;
+
+ vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+ if (!vpgd)
+ return -ENOMEM;
+
+ nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+ mutex_lock(&vm->mm->mutex);
+ for (i = vm->fpde; i <= vm->lpde; i++)
+ vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+ list_add(&vpgd->head, &vm->pgd_list);
+ mutex_unlock(&vm->mm->mutex);
+ return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+
+ if (!pgd)
+ return;
+
+ mutex_lock(&vm->mm->mutex);
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ if (vpgd->obj != pgd)
+ continue;
+
+ list_del(&vpgd->head);
+ nouveau_gpuobj_ref(NULL, &vpgd->obj);
+ kfree(vpgd);
+ }
+ mutex_unlock(&vm->mm->mutex);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ nouveau_vm_unlink(vm, vpgd->obj);
+ }
+ WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+
+ kfree(vm->pgt);
+ kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+ struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm *vm;
+ int ret;
+
+ vm = ref;
+ if (vm) {
+ ret = nouveau_vm_link(vm, pgd);
+ if (ret)
+ return ret;
+
+ vm->refcount++;
+ }
+
+ vm = *ptr;
+ *ptr = ref;
+
+ if (vm) {
+ nouveau_vm_unlink(vm, pgd);
+
+ if (--vm->refcount == 0)
+ nouveau_vm_del(vm);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 00000000000..e1193515771
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+struct nouveau_vm_pgt {
+ struct nouveau_gpuobj *obj[2];
+ u32 refcount[2];
+};
+
+struct nouveau_vm_pgd {
+ struct list_head head;
+ struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_vma {
+ struct nouveau_vm *vm;
+ struct nouveau_mm_node *node;
+ u64 offset;
+ u32 access;
+};
+
+struct nouveau_vm {
+ struct drm_device *dev;
+ struct nouveau_mm *mm;
+ int refcount;
+
+ struct list_head pgd_list;
+ atomic_t pgraph_refs;
+ atomic_t pcrypt_refs;
+
+ struct nouveau_vm_pgt *pgt;
+ u32 fpde;
+ u32 lpde;
+
+ u32 pgt_bits;
+ u8 spg_shift;
+ u8 lpg_shift;
+
+ void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+ void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+ void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+ void (*flush)(struct nouveau_vm *);
+};
+
+/* nouveau_vm.c */
+int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **);
+int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+ struct nouveau_gpuobj *pgd);
+int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+ dma_addr_t *);
+
+/* nv50_vm.c */
+void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nv50_vm_flush(struct nouveau_vm *);
+void nv50_vm_flush_engine(struct drm_device *, int engine);
+
+/* nvc0_vm.c */
+void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nvc0_vm_flush(struct nouveau_vm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e18074162..297505eb98d 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
if (dev_priv->card_type >= NV_30)
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
- regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+ if (dev_priv->card_type >= NV_10)
+ regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+ else
+ regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
+ drm_vblank_pre_modeset(dev, nv_crtc->index);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ drm_vblank_post_modeset(dev, nv_crtc->index);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
+ .page_flip = nouveau_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f2ffc..e000455e06d 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
* use a 10ms timeout (guards against crtc being inactive, in
* which case blank state would never change)
*/
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000001))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000001))
return -EBUSY;
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf772e3..1715e1464b7 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+static void nv04_vblank_crtc0_isr(struct drm_device *);
+static void nv04_vblank_crtc1_isr(struct drm_device *);
+
static void
nv04_display_store_initial_head_owner(struct drm_device *dev)
{
@@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev)
func->save(encoder);
}
+ nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
+ nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
return 0;
}
@@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
+ nouveau_irq_unregister(dev, 24);
+ nouveau_irq_unregister(dev, 25);
+
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev)
return 0;
}
+static void
+nv04_vblank_crtc0_isr(struct drm_device *dev)
+{
+ nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ drm_handle_vblank(dev, 0);
+}
+
+static void
+nv04_vblank_crtc1_isr(struct drm_device *dev)
+{
+ nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ drm_handle_vblank(dev, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c9388bc..7a118937109 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
-void
+int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(info, region);
- return;
- }
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
OUT_RING(chan, (region->sy << 16) | region->sx);
OUT_RING(chan, (region->dy << 16) | region->dx);
OUT_RING(chan, (region->height << 16) | region->width);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(info, rect);
- return;
- }
+ ret = RING_SPACE(chan, 7);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t dsize;
uint32_t width;
uint32_t *data = (uint32_t *)image->data;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (image->depth != 1) {
- cfb_imageblit(info, image);
- return;
- }
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
- nouveau_fbcon_gpu_lockup(info);
- }
+ if (image->depth != 1)
+ return -ENODEV;
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
width = ALIGN(image->width, 8);
dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
- if (RING_SPACE(chan, iter_len + 1)) {
- nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, iter_len + 1);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
FIRE_RING(chan);
-}
-
-static int
-nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
+ return 0;
}
int
@@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
- 0x0062 : 0x0042, NvCtxSurf2D);
+ ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
+ dev_priv->card_type >= NV_10 ?
+ 0x0062 : 0x0042);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+ ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+ ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+ ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+ ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
- 0x009f : 0x005f, NvImageBlit);
+ ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
+ dev_priv->chipset >= 0x11 ?
+ 0x009f : 0x005f);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b7ddc..f89d104698d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_util.h"
#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
/* Setup initial state */
@@ -151,10 +157,31 @@ void
nv04_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ unsigned long flags;
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pfifo->reassign(dev, false);
+ /* Unload the context if it's the currently active one */
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+
+ /* Keep it from being rescheduled */
+ nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
+
+ pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the channel resources */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
@@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev)
if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
return 0;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
@@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
static void
nv04_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
@@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev)
return 0;
}
+void
+nv04_fifo_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x2140, 0x00000000);
+ nouveau_irq_unregister(dev, 8);
+}
+
+static bool
+nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ struct nouveau_gpuobj *obj;
+ unsigned long flags;
+ const int subc = (addr >> 13) & 0x7;
+ const int mthd = addr & 0x1ffc;
+ bool handled = false;
+ u32 engine;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+ chan = dev_priv->channels.ptr[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ switch (mthd) {
+ case 0x0000: /* bind object to subchannel */
+ obj = nouveau_ramht_find(chan, data);
+ if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+ break;
+
+ chan->sw_subchannel[subc] = obj->class;
+ engine = 0x0000000f << (subc * 4);
+
+ nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
+ handled = true;
+ break;
+ default:
+ engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+ if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+ break;
+
+ if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+ mthd, data))
+ handled = true;
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return handled;
+}
+
+void
+nv04_fifo_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t status, reassign;
+ int cnt = 0;
+
+ reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ uint32_t chid, get;
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ chid = engine->fifo.channel_id(dev);
+ get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+ * wrapping on my G80 chips, but CACHE1 isn't big
+ * enough for this much data.. Tests show that it
+ * wraps around to the start at GET=0x800.. No clue
+ * as to why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (dev_priv->card_type < NV_40) {
+ mthd = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
+ NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ u32 dma_get = nv_rd32(dev, 0x003244);
+ u32 dma_put = nv_rd32(dev, 0x003240);
+ u32 push = nv_rd32(dev, 0x003220);
+ u32 state = nv_rd32(dev, 0x003228);
+
+ if (dev_priv->card_type == NV_50) {
+ u32 ho_get = nv_rd32(dev, 0x003328);
+ u32 ho_put = nv_rd32(dev, 0x003320);
+ u32 ib_get = nv_rd32(dev, 0x003334);
+ u32 ib_put = nv_rd32(dev, 0x003330);
+
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x Push 0x%08x\n",
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(dev, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(dev, 0x003244, dma_put);
+ nv_wr32(dev, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put) {
+ nv_wr32(dev, 0x003334, ib_put);
+ }
+ } else {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+ "Put 0x%08x State 0x%08x Push 0x%08x\n",
+ chid, dma_get, dma_put, state, push);
+
+ if (dma_get != dma_put)
+ nv_wr32(dev, 0x003244, dma_put);
+ }
+
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000001);
+ nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
+
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ if (dev_priv->card_type == NV_50) {
+ if (status & 0x00000010) {
+ nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ status &= ~0x00000010;
+ nv_wr32(dev, 0x002100, 0x00000010);
+ }
+ }
+
+ if (status) {
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
+ nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+ status = 0;
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+ nv_wr32(dev, 0x2140, 0);
+ nv_wr32(dev, 0x140, 0);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c8973421b63..af75015068d 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_util.h"
+
+static int nv04_graph_register(struct drm_device *dev);
+static void nv04_graph_isr(struct drm_device *dev);
static uint32_t nv04_graph_ctx_regs[] = {
0x0040053c,
@@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev)
if (chid >= dev_priv->engine.fifo.channels)
return NULL;
- return dev_priv->fifos[chid];
+ return dev_priv->channels.ptr[chid];
}
-void
+static void
nv04_graph_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev)
struct nouveau_channel *chan = NULL;
int chid;
- pgraph->fifo_access(dev, false);
nouveau_wait_for_idle(dev);
/* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = dev_priv->engine.fifo.channel_id(dev);
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (chan)
nv04_graph_load_context(chan);
-
- pgraph->fifo_access(dev, true);
}
static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
void nv04_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+ /* Free the context resources */
kfree(pgraph_ctx);
chan->pgraph_ctx = NULL;
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ int ret;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
+ ret = nv04_graph_register(dev);
+ if (ret)
+ return ret;
+
/* Enable PGRAPH interrupts */
+ nouveau_irq_register(dev, 12, nv04_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev)
void nv04_graph_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
void
@@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
}
static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
atomic_set(&chan->fence.last_sequence_irq, data);
return 0;
}
+int
+nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_page_flip_state s;
+
+ if (!nouveau_finish_page_flip(chan, &s))
+ nv_set_crtc_base(dev, s.crtc,
+ s.offset + s.y * s.pitch + s.x * s.bpp / 8);
+
+ return 0;
+}
+
/*
* Software methods, why they are needed, and how they all work:
*
@@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
*/
static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
- uint32_t tmp;
+ u32 tmp;
tmp = nv_ri32(dev, instance);
tmp &= ~mask;
@@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
}
static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
- uint32_t tmp, ctx1;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ u32 tmp, ctx1;
int class, op, valid = 1;
ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val
}
static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
if (data > 5)
return 1;
/* Old versions of the objects only accept first three operations. */
- if (data > 2 && grclass < 0x40)
+ if (data > 2 && class < 0x40)
return 1;
nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
@@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
@@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
return 1;
}
-static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
- { 0x0150, nv04_graph_mthd_set_ref },
- {}
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
- { 0x0184, nv04_graph_mthd_bind_nv01_patt },
- { 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv01_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv04_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
- { 0x0188, nv04_graph_mthd_bind_chroma },
- { 0x018c, nv04_graph_mthd_bind_clip },
- { 0x0190, nv04_graph_mthd_bind_nv04_patt },
- { 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv01_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x0304, nv04_graph_mthd_set_operation },
- {},
-};
+static int
+nv04_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
- { 0x0184, nv04_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
+ if (dev_priv->engine.graph.registered)
+ return 0;
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
- { 0x0184, nv04_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
+ /* dvd subpicture */
+ NVOBJ_CLASS(dev, 0x0038, GR);
+
+ /* m2mf */
+ NVOBJ_CLASS(dev, 0x0039, GR);
+
+ /* nv03 gdirect */
+ NVOBJ_CLASS(dev, 0x004b, GR);
+ NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 gdirect */
+ NVOBJ_CLASS(dev, 0x004a, GR);
+ NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 imageblit */
+ NVOBJ_CLASS(dev, 0x001f, GR);
+ NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
+ NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 imageblit */
+ NVOBJ_CLASS(dev, 0x005f, GR);
+ NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 iifc */
+ NVOBJ_CLASS(dev, 0x0060, GR);
+ NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
+
+ /* nv05 iifc */
+ NVOBJ_CLASS(dev, 0x0064, GR);
+
+ /* nv01 ifc */
+ NVOBJ_CLASS(dev, 0x0021, GR);
+ NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 ifc */
+ NVOBJ_CLASS(dev, 0x0061, GR);
+ NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 ifc */
+ NVOBJ_CLASS(dev, 0x0065, GR);
+
+ /* nv03 sifc */
+ NVOBJ_CLASS(dev, 0x0036, GR);
+ NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifc */
+ NVOBJ_CLASS(dev, 0x0076, GR);
+ NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 sifc */
+ NVOBJ_CLASS(dev, 0x0066, GR);
+
+ /* nv03 sifm */
+ NVOBJ_CLASS(dev, 0x0037, GR);
+ NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifm */
+ NVOBJ_CLASS(dev, 0x0077, GR);
+ NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* null */
+ NVOBJ_CLASS(dev, 0x0030, GR);
+
+ /* surf2d */
+ NVOBJ_CLASS(dev, 0x0042, GR);
+
+ /* rop */
+ NVOBJ_CLASS(dev, 0x0043, GR);
+
+ /* beta1 */
+ NVOBJ_CLASS(dev, 0x0012, GR);
+
+ /* beta4 */
+ NVOBJ_CLASS(dev, 0x0072, GR);
+
+ /* cliprect */
+ NVOBJ_CLASS(dev, 0x0019, GR);
+
+ /* nv01 pattern */
+ NVOBJ_CLASS(dev, 0x0018, GR);
+
+ /* nv04 pattern */
+ NVOBJ_CLASS(dev, 0x0044, GR);
+
+ /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0052, GR);
+
+ /* surf3d */
+ NVOBJ_CLASS(dev, 0x0053, GR);
+ NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
+ NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
+
+ /* nv03 tex_tri */
+ NVOBJ_CLASS(dev, 0x0048, GR);
+ NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
+ NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
+
+ /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0054, GR);
+
+ /* multitex_tri */
+ NVOBJ_CLASS(dev, 0x0055, GR);
+
+ /* nv01 chroma */
+ NVOBJ_CLASS(dev, 0x0017, GR);
+
+ /* nv04 chroma */
+ NVOBJ_CLASS(dev, 0x0057, GR);
+
+ /* surf_dst */
+ NVOBJ_CLASS(dev, 0x0058, GR);
+
+ /* surf_src */
+ NVOBJ_CLASS(dev, 0x0059, GR);
+
+ /* surf_color */
+ NVOBJ_CLASS(dev, 0x005a, GR);
+
+ /* surf_zeta */
+ NVOBJ_CLASS(dev, 0x005b, GR);
+
+ /* nv01 line */
+ NVOBJ_CLASS(dev, 0x001c, GR);
+ NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 line */
+ NVOBJ_CLASS(dev, 0x005c, GR);
+ NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 tri */
+ NVOBJ_CLASS(dev, 0x001d, GR);
+ NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 tri */
+ NVOBJ_CLASS(dev, 0x005d, GR);
+ NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 rect */
+ NVOBJ_CLASS(dev, 0x001e, GR);
+ NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 rect */
+ NVOBJ_CLASS(dev, 0x005e, GR);
+ NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, nv04_graph_mthd_bind_surf_zeta },
- {},
+static struct nouveau_bitfield nv04_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ {}
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
- { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
- {},
+static struct nouveau_bitfield nv04_graph_nstatus[] =
+{
+ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
+ {}
};
-struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0038, false, NULL }, /* dvd subpicture */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
- { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
- { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
- { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
- { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
- { 0x0064, false, NULL }, /* nv05 iifc */
- { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
- { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
- { 0x0065, false, NULL }, /* nv05 ifc */
- { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
- { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
- { 0x0066, false, NULL }, /* nv05 sifc */
- { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
- { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
- { 0x0030, false, NULL }, /* null */
- { 0x0042, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0018, false, NULL }, /* nv01 pattern */
- { 0x0044, false, NULL }, /* nv04 pattern */
- { 0x0052, false, NULL }, /* swzsurf */
- { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
- { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
- { 0x0054, false, NULL }, /* tex_tri */
- { 0x0055, false, NULL }, /* multitex_tri */
- { 0x0017, false, NULL }, /* nv01 chroma */
- { 0x0057, false, NULL }, /* nv04 chroma */
- { 0x0058, false, NULL }, /* surf_dst */
- { 0x0059, false, NULL }, /* surf_src */
- { 0x005a, false, NULL }, /* surf_color */
- { 0x005b, false, NULL }, /* surf_zeta */
- { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
- { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
- { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
- { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
- { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
- { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
- { 0x506e, true, nv04_graph_mthds_sw },
+struct nouveau_bitfield nv04_graph_nsource[] =
+{
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
{}
};
+static void
+nv04_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x0f000000) >> 24;
+ u32 subc = (addr & 0x0000e000) >> 13;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_NOTIFY) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_NOTIFY;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv04_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv04_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae297abd..b8e3edb5c06 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev)
}
int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *sz)
+nv04_instmem_suspend(struct drm_device *dev)
{
return 0;
}
void
-nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-}
-
-int
-nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv04_instmem_resume(struct drm_device *dev)
{
- return 0;
}
int
-nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_mm_node *ramin = NULL;
+
+ do {
+ if (drm_mm_pre_get(&dev_priv->ramin_heap))
+ return -ENOMEM;
+
+ spin_lock(&dev_priv->ramin_lock);
+ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
+ if (ramin == NULL) {
+ spin_unlock(&dev_priv->ramin_lock);
+ return -ENOMEM;
+ }
+
+ ramin = drm_mm_get_block_atomic(ramin, size, align);
+ spin_unlock(&dev_priv->ramin_lock);
+ } while (ramin == NULL);
+
+ gpuobj->node = ramin;
+ gpuobj->vinst = ramin->start;
return 0;
}
void
-nv04_instmem_flush(struct drm_device *dev)
+nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
+ drm_mm_put_block(gpuobj->node);
+ gpuobj->node = NULL;
+ spin_unlock(&dev_priv->ramin_lock);
}
int
-nv04_instmem_suspend(struct drm_device *dev)
+nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
{
+ gpuobj->pinst = gpuobj->vinst;
return 0;
}
void
-nv04_instmem_resume(struct drm_device *dev)
+nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
}
+void
+nv04_instmem_flush(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda44e50..f78181a59b4 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct drm_mm_node *mem;
+ int ret;
+
+ ret = drm_mm_pre_get(&pfb->tag_heap);
+ if (ret)
+ return NULL;
+
+ spin_lock(&dev_priv->tile.lock);
+ mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block_atomic(mem, size, 0);
+ spin_unlock(&dev_priv->tile.lock);
+
+ return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->tile.lock);
+ drm_mm_put_block(mem);
+ spin_unlock(&dev_priv->tile.lock);
+}
+
+void
+nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+ tile->addr = addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+
+ if (dev_priv->card_type == NV_20) {
+ if (flags & NOUVEAU_GEM_TILE_ZETA) {
+ /*
+ * Allocate some of the on-die tag memory,
+ * used to store Z compression meta-data (most
+ * likely just a bitmap determining if a given
+ * tile is compressed or not).
+ */
+ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+
+ if (tile->tag_mem) {
+ /* Enable Z compression */
+ if (dev_priv->chipset >= 0x25)
+ tile->zcomp = tile->tag_mem->start |
+ (bpp == 16 ?
+ NV25_PFB_ZCOMP_MODE_16 :
+ NV25_PFB_ZCOMP_MODE_32);
+ else
+ tile->zcomp = tile->tag_mem->start |
+ NV20_PFB_ZCOMP_EN |
+ (bpp == 16 ? 0 :
+ NV20_PFB_ZCOMP_MODE_32);
+ }
+
+ tile->addr |= 3;
+ } else {
+ tile->addr |= 1;
+ }
+
+ } else {
+ tile->addr |= 1 << 31;
+ }
+}
+
void
-nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_fb_free_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- if (pitch) {
- if (dev_priv->card_type >= NV_20)
- addr |= 1;
- else
- addr |= 1 << 31;
+ if (tile->tag_mem) {
+ nv20_fb_free_tag(dev, tile->tag_mem);
+ tile->tag_mem = NULL;
}
- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+}
+
+void
+nv10_fb_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+
+ if (dev_priv->card_type == NV_20)
+ nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
}
int
@@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev)
pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ if (dev_priv->card_type == NV_20)
+ drm_mm_init(&pfb->tag_heap, 0,
+ (dev_priv->chipset >= 0x25 ?
+ 64 * 1024 : 32 * 1024));
+
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
return 0;
}
@@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev)
void
nv10_fb_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->free_tile_region(dev, i);
+
+ if (dev_priv->card_type == NV_20)
+ drm_mm_takedown(&pfb->tag_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad58fd..d2ecbff4bee 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
@@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
return 0;
}
-void
-nv10_fifo_destroy_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
static void
nv10_fifo_do_load_context(struct drm_device *dev, int chid)
{
@@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
static void
nv10_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c973115..8c92edb7bbc 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
+#include "nouveau_util.h"
+
+static int nv10_graph_register(struct drm_device *);
+static void nv10_graph_isr(struct drm_device *);
#define NV10_FIFO_NUMBER 32
@@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev)
return 0;
}
-void
+static void
nv10_graph_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_channel *chan = NULL;
int chid;
- pgraph->fifo_access(dev, false);
nouveau_wait_for_idle(dev);
/* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (chan && chan->pgraph_ctx)
nv10_graph_load_context(chan);
-
- pgraph->fifo_access(dev, true);
}
#define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev)
if (chid >= dev_priv->engine.fifo.channels)
return NULL;
- return dev_priv->fifos[chid];
+ return dev_priv->channels.ptr[chid];
}
int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
void nv10_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+ /* Free the context resources */
kfree(pgraph_ctx);
chan->pgraph_ctx = NULL;
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
void
-nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_graph_set_tile_region(struct drm_device *dev, int i)
{
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1 << 31;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
}
int nv10_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
- int i;
+ int ret, i;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
+ ret = nv10_graph_register(dev);
+ if (ret)
+ return ret;
+
+ nouveau_irq_register(dev, 12, nv10_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv10_graph_set_tile_region(dev, i);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev)
void nv10_graph_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
struct graph_state *ctx = chan->pgraph_ctx;
struct pipe_state *pipe = &ctx->pipe_state;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
uint32_t xfmode0, xfmode1;
int i;
@@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
nouveau_wait_for_idle(dev);
- pgraph->fifo_access(dev, true);
-
return 0;
}
static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
nouveau_wait_for_idle(dev);
@@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
nv_wr32(dev, 0x004006b0,
nv_rd32(dev, 0x004006b0) | 0x8 << 24);
- pgraph->fifo_access(dev, true);
+ return 0;
+}
+
+static int
+nv10_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
+ NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
+
+ /* celcius */
+ if (dev_priv->chipset <= 0x10) {
+ NVOBJ_CLASS(dev, 0x0056, GR);
+ } else
+ if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
+ NVOBJ_CLASS(dev, 0x0096, GR);
+ } else {
+ NVOBJ_CLASS(dev, 0x0099, GR);
+ NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+ }
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
return 0;
}
-static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
- { 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, nv17_graph_mthd_lma_enable },
+struct nouveau_bitfield nv10_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ { NV_PGRAPH_INTR_ERROR, "ERROR" },
{}
};
-struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x005f, false, NULL }, /* imageblit */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x0052, false, NULL }, /* swzsurf */
- { 0x0093, false, NULL }, /* surf3d */
- { 0x0094, false, NULL }, /* tex_tri */
- { 0x0095, false, NULL }, /* multitex_tri */
- { 0x0056, false, NULL }, /* celcius (nv10) */
- { 0x0096, false, NULL }, /* celcius (nv11) */
- { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
+struct nouveau_bitfield nv10_graph_nstatus[] =
+{
+ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
{}
};
+
+static void
+nv10_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv10_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd56ec..8464b76798d 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
#define NV34_GRCTX_SIZE (18140)
#define NV35_36_GRCTX_SIZE (22396)
+static int nv20_graph_register(struct drm_device *);
+static int nv30_graph_register(struct drm_device *);
+static void nv20_graph_isr(struct drm_device *);
+
static void
nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
@@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ unsigned long flags;
- nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
int
@@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev)
}
void
-nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv20_graph_set_tile_region(struct drm_device *dev, int i)
{
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+ if (dev_priv->card_type == NV_20) {
+ nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+ }
}
int
@@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev)
nv20_graph_rdi(dev);
+ ret = nv20_graph_register(dev);
+ if (ret) {
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+ return ret;
+ }
+
+ nouveau_irq_register(dev, 12, nv20_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x40009C , 0x00000040);
if (dev_priv->chipset >= 0x25) {
- nv_wr32(dev, 0x400890, 0x00080000);
+ nv_wr32(dev, 0x400890, 0x00a8cfff);
nv_wr32(dev, 0x400610, 0x304B1FB6);
- nv_wr32(dev, 0x400B80, 0x18B82880);
+ nv_wr32(dev, 0x400B80, 0x1cbd3883);
nv_wr32(dev, 0x400B84, 0x44000000);
nv_wr32(dev, 0x400098, 0x40000080);
nv_wr32(dev, 0x400B88, 0x000000ff);
+
} else {
- nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+ nv_wr32(dev, 0x400880, 0x0008c7df);
nv_wr32(dev, 0x400094, 0x00000005);
- nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+ nv_wr32(dev, 0x400B80, 0x45eae20e);
nv_wr32(dev, 0x400B84, 0x24000000);
nv_wr32(dev, 0x400098, 0x00000040);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv20_graph_set_tile_region(dev, i);
- for (i = 0; i < 8; i++) {
- nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, 0x100300 + i * 4));
- }
nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
+
nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
}
@@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev)
return ret;
}
+ ret = nv30_graph_register(dev);
+ if (ret) {
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+ return ret;
+ }
+
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
pgraph->ctx_table->pinst >> 4);
+ nouveau_irq_register(dev, 12, nv20_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv20_graph_set_tile_region(dev, i);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
@@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev)
return 0;
}
-struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x009e, false, NULL }, /* swzsurf */
- { 0x0096, false, NULL }, /* celcius */
- { 0x0097, false, NULL }, /* kelvin (nv20) */
- { 0x0597, false, NULL }, /* kelvin (nv25) */
- {}
-};
-
-struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x038a, false, NULL }, /* ifc (nv30) */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0389, false, NULL }, /* sifm (nv30) */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0362, false, NULL }, /* surf2d (nv30) */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x039e, false, NULL }, /* swzsurf */
- { 0x0397, false, NULL }, /* rankine (nv30) */
- { 0x0497, false, NULL }, /* rankine (nv35) */
- { 0x0697, false, NULL }, /* rankine (nv34) */
- {}
-};
+static int
+nv20_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
+
+ /* kelvin */
+ if (dev_priv->chipset < 0x25)
+ NVOBJ_CLASS(dev, 0x0097, GR);
+ else
+ NVOBJ_CLASS(dev, 0x0597, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static int
+nv30_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
+
+ /* rankine */
+ if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0397, GR);
+ else
+ if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0697, GR);
+ else
+ if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0497, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static void
+nv20_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f09512..e0135f0e214 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = addr | 1;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv30_fb_free_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = tile->limit = tile->pitch = 0;
+}
+
static int
calc_bias(struct drm_device *dev, int k, int i, int j)
{
@@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
/* Init the memory timing regs at 0x10037c/0x1003ac */
if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8d5bd..f3d9c0505f7 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
#include "nouveau_drm.h"
void
-nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv40_fb_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
case 0x40:
- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
break;
default:
- nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV40_PFB_TILE(i), addr);
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
break;
}
}
@@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b28cd..49b9a35a9cd 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV40_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
@@ -59,7 +64,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
nv_wi32(dev, fc + 60, 0x0001FFFF);
/* enable the fifo dma operation */
@@ -70,17 +74,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
return 0;
}
-void
-nv40_fifo_destroy_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
static void
nv40_fifo_do_load_context(struct drm_device *dev, int chid)
{
@@ -279,6 +272,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
static void
nv40_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -301,7 +295,7 @@ nv40_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91569b..19ef92a0375 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
#include "nouveau_drv.h"
#include "nouveau_grctx.h"
+static int nv40_graph_register(struct drm_device *);
+static void nv40_graph_isr(struct drm_device *);
+
struct nouveau_channel *
nv40_graph_channel(struct drm_device *dev)
{
@@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->ramin_grctx &&
chan->ramin_grctx->pinst == inst)
@@ -59,6 +62,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_grctx ctx = {};
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
@@ -73,12 +77,39 @@ nv40_graph_create_context(struct nouveau_channel *chan)
nv40_grctx_init(&ctx);
nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
+
+ /* init grctx pointer in ramfc, and on PFIFO if channel is
+ * already active there
+ */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+ if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
+ nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
void
nv40_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
@@ -174,43 +205,39 @@ nv40_graph_unload_context(struct drm_device *dev)
}
void
-nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv40_graph_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
case 0x44:
case 0x4a:
case 0x4e:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
default:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
}
}
@@ -232,7 +259,7 @@ nv40_graph_init(struct drm_device *dev)
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_grctx ctx = {};
uint32_t vramsz, *cp;
- int i, j;
+ int ret, i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +283,14 @@ nv40_graph_init(struct drm_device *dev)
kfree(cp);
+ ret = nv40_graph_register(dev);
+ if (ret)
+ return ret;
+
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+ nouveau_irq_register(dev, 12, nv40_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -347,7 +379,7 @@ nv40_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv40_graph_set_tile_region(dev, i);
/* begin RAM config */
vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -390,26 +422,111 @@ nv40_graph_init(struct drm_device *dev)
void nv40_graph_takedown(struct drm_device *dev)
{
+ nouveau_irq_unregister(dev, 12);
}
-struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x3089, false, NULL }, /* sifm (nv40) */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x3062, false, NULL }, /* surf2d (nv40) */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x309e, false, NULL }, /* swzsurf */
- { 0x4097, false, NULL }, /* curie (nv40) */
- { 0x4497, false, NULL }, /* curie (nv44) */
- {}
-};
+static int
+nv40_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
+
+ /* curie */
+ if (dev_priv->chipset >= 0x60 ||
+ 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x4497, GR);
+ else
+ NVOBJ_CLASS(dev, 0x4097, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static int
+nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin_grctx)
+ continue;
+
+ if (inst == chan->ramin_grctx->pinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv40_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
+ u32 chid = nv40_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ } else
+ if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ nv_mask(dev, 0x402000, 0, 0);
+ }
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0c6de..9023c4dbb44 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -115,15 +115,16 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
if (dev_priv->chipset != 0x50)
- if (nv_crtc->fb.tile_flags == 0x7a00)
+ if (nv_crtc->fb.tile_flags == 0x7a00 ||
+ nv_crtc->fb.tile_flags == 0xfe00)
OUT_RING(evo, NvEvoFB32);
else
if (nv_crtc->fb.tile_flags == 0x7000)
OUT_RING(evo, NvEvoFB16);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
}
nv_crtc->fb.blanked = blanked;
@@ -345,7 +346,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t buffer_handle, uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cursor = NULL;
struct drm_gem_object *gem;
@@ -374,8 +374,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor);
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
- dev_priv->vm_vram_base);
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
nv_crtc->cursor.show(nv_crtc, true);
out:
@@ -437,6 +436,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = {
.cursor_move = nv50_crtc_cursor_move,
.gamma_set = nv50_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
+ .page_flip = nouveau_crtc_page_flip,
.destroy = nv50_crtc_destroy,
};
@@ -453,6 +453,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@@ -468,6 +469,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
+ drm_vblank_post_modeset(dev, nv_crtc->index);
ret = RING_SPACE(evo, 2);
if (ret) {
@@ -545,7 +547,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+ nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -554,13 +556,14 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- if (nv_crtc->fb.tile_flags == 0x7a00)
+ if (nv_crtc->fb.tile_flags == 0x7a00 ||
+ nv_crtc->fb.tile_flags == 0xfe00)
OUT_RING(evo, NvEvoFB32);
else
if (nv_crtc->fb.tile_flags == 0x7000)
OUT_RING(evo, NvEvoFB16);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
}
ret = RING_SPACE(evo, 12);
@@ -574,8 +577,10 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (!nv_crtc->fb.tile_flags) {
OUT_RING(evo, drm_fb->pitch | (1 << 20));
} else {
- OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
- fb->nvbo->tile_mode);
+ u32 tile_mode = fb->nvbo->tile_mode;
+ if (dev_priv->card_type >= NV_C0)
+ tile_mode >>= 4;
+ OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
}
if (dev_priv->chipset == 0x50)
OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c611dde..7cc94ed9ed9 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
#include "nouveau_ramht.h"
#include "drm_crtc_helper.h"
+static void nv50_display_isr(struct drm_device *);
+
static inline int
nv50_sor_nr(struct drm_device *dev)
{
@@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
-static void
-nv50_evo_channel_del(struct nouveau_channel **pchan)
-{
- struct nouveau_channel *chan = *pchan;
-
- if (!chan)
- return;
- *pchan = NULL;
-
- nouveau_gpuobj_channel_takedown(chan);
- nouveau_bo_unmap(chan->pushbuf_bo);
- nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-
- if (chan->user)
- iounmap(chan->user);
-
- kfree(chan);
-}
-
-static int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
- uint32_t tile_flags, uint32_t magic_flags,
- uint32_t offset, uint32_t limit)
-{
- struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
- struct drm_device *dev = evo->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
- if (ret)
- return ret;
- obj->engine = NVOBJ_ENGINE_DISPLAY;
-
- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(obj, 4, limit);
- nv_wo32(obj, 8, offset);
- nv_wo32(obj, 12, 0x00000000);
- nv_wo32(obj, 16, 0x00000000);
- if (dev_priv->card_type < NV_C0)
- nv_wo32(obj, 20, 0x00010000);
- else
- nv_wo32(obj, 20, 0x00020000);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nouveau_ramht_insert(evo, name, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret) {
- return ret;
- }
-
- return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramht = NULL;
- struct nouveau_channel *chan;
- int ret;
-
- chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- *pchan = chan;
-
- chan->id = -1;
- chan->dev = dev;
- chan->user_get = 4;
- chan->user_put = 0;
-
- ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
- if (ret) {
- NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
- if (ret) {
- NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
- if (ret) {
- NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
- nouveau_gpuobj_ref(NULL, &ramht);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- if (dev_priv->chipset != 0x50) {
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
- 0, 0xffffffff);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
-
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
- 0, 0xffffffff);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
- }
-
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
- false, true, &chan->pushbuf_bo);
- if (ret == 0)
- ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_bo_map(chan->pushbuf_bo);
- if (ret) {
- NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(0), PAGE_SIZE);
- if (!chan->user) {
- NV_ERROR(dev, "Error mapping EVO control regs.\n");
- nv50_evo_channel_del(pchan);
- return -ENOMEM;
- }
-
- return 0;
-}
-
int
nv50_display_early_init(struct drm_device *dev)
{
@@ -214,17 +63,16 @@ int
nv50_display_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_connector *connector;
- uint32_t val, ram_amount;
- uint64_t start;
+ struct nouveau_channel *evo;
int ret, i;
+ u32 val;
NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+
/*
* I think the 0x006101XX range is some kind of main control area
* that enables things.
@@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev)
val = nv_rd32(dev, 0x0061610c + (i * 0x800));
nv_wr32(dev, 0x0061019c + (i * 0x10), val);
}
+
/* DAC */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
}
+
/* SOR */
for (i = 0; i < nv50_sor_nr(dev); i++) {
val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
}
+
/* EXT */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
}
- /* This used to be in crtc unblank, but seems out of place there. */
- nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
- /* RAM is clamped to 256 MiB. */
- ram_amount = dev_priv->vram_size;
- NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
- if (ram_amount > 256*1024*1024)
- ram_amount = 256*1024*1024;
- nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
- nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
- nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
-
/* The precise purpose is unknown, i suspect it has something to do
* with text mode.
*/
@@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev)
}
}
- /* taken from nv bug #12637, attempts to un-wedge the hw if it's
- * stuck in some unspecified state
- */
- start = ptimer->read(dev);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
- while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
- if ((val & 0x9f0000) == 0x20000)
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- val | 0x800000);
-
- if ((val & 0x3f0000) == 0x30000)
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- val | 0x200000);
-
- if (ptimer->read(dev) - start > 1000000000ULL) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
- return -EBUSY;
- }
- }
-
- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- 0x40000000, 0x40000000)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
- return -EBUSY;
- }
-
for (i = 0; i < 2; i++) {
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+ nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
- /* initialise fifo */
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
- ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
- NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
- NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
- if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
- return -EBUSY;
+ if (conn->dcb->gpio_tag == 0xff)
+ continue;
+
+ pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
}
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
- nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
- nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
-
- evo->dma.max = (4096/4) - 2;
- evo->dma.put = 0;
- evo->dma.cur = evo->dma.put;
- evo->dma.free = evo->dma.max - evo->dma.cur;
-
- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+
+ ret = nv50_evo_init(dev);
if (ret)
return ret;
+ evo = dev_priv->evo;
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(evo, 0);
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
ret = RING_SPACE(evo, 11);
if (ret)
@@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev)
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
- /* enable clock change interrupts. */
- nv_wr32(dev, 0x610028, 0x00010001);
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
- NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
- NV50_PDISPLAY_INTR_EN_CLK_UNK40));
-
- /* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
-
- if (conn->dcb->gpio_tag == 0xff)
- continue;
-
- pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
- }
return 0;
}
@@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
- }
+ nv50_evo_fini(dev);
for (i = 0; i < 3; i++) {
if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev)
}
/* disable interrupts. */
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
/* disable hotplug interrupts */
nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev)
dev->mode_config.fb_base = dev_priv->fb_phys;
- /* Create EVO channel */
- ret = nv50_evo_channel_new(dev, &dev_priv->evo);
- if (ret) {
- NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
- return ret;
- }
-
/* Create CRTC objects */
for (i = 0; i < 2; i++)
nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev)
}
}
+ INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ nouveau_irq_register(dev, 26, nv50_display_isr);
+
ret = nv50_display_init(dev);
if (ret) {
nv50_display_destroy(dev);
@@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
- nv50_evo_channel_del(&dev_priv->evo);
+ nouveau_irq_unregister(dev, 26);
}
static u16
@@ -660,32 +434,32 @@ static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- struct list_head *entry, *tmp;
+ struct nouveau_channel *chan, *tmp;
- list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
- chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+ list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
+ nvsw.vbl_wait) {
+ if (chan->nvsw.vblsem_head != crtc)
+ continue;
nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
chan->nvsw.vblsem_rval);
list_del(&chan->nvsw.vbl_wait);
+ drm_vblank_put(dev, crtc);
}
+
+ drm_handle_vblank(dev, crtc);
}
static void
nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
{
- intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
nv50_display_vblank_crtc_handler(dev, 0);
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
nv50_display_vblank_crtc_handler(dev, 1);
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
- NV50_PDISPLAY_INTR_EN) & ~intr);
- nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
}
static void
@@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work)
static void
nv50_display_error_handler(struct drm_device *dev)
{
- uint32_t addr, data;
-
- nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
- addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
- data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
-
- NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
- 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
-
- nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
-}
-
-void
-nv50_display_irq_hotplug_bh(struct work_struct *work)
-{
- struct drm_nouveau_private *dev_priv =
- container_of(work, struct drm_nouveau_private, hpd_work);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector;
- const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- uint32_t unplug_mask, plug_mask, change_mask;
- uint32_t hpd0, hpd1;
-
- spin_lock_irq(&dev_priv->hpd_state.lock);
- hpd0 = dev_priv->hpd_state.hpd0_bits;
- dev_priv->hpd_state.hpd0_bits = 0;
- hpd1 = dev_priv->hpd_state.hpd1_bits;
- dev_priv->hpd_state.hpd1_bits = 0;
- spin_unlock_irq(&dev_priv->hpd_state.lock);
-
- hpd0 &= nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- hpd1 &= nv_rd32(dev, 0xe070);
-
- plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
- unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
- change_mask = plug_mask | unplug_mask;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_encoder_helper_funcs *helper;
- struct nouveau_connector *nv_connector =
- nouveau_connector(connector);
- struct nouveau_encoder *nv_encoder;
- struct dcb_gpio_entry *gpio;
- uint32_t reg;
- bool plugged;
-
- if (!nv_connector->dcb)
- continue;
+ u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
+ u32 addr, data;
+ int chid;
- gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
- if (!gpio || !(change_mask & (1 << gpio->line)))
+ for (chid = 0; chid < 5; chid++) {
+ if (!(channels & (1 << chid)))
continue;
- reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
- plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
- NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector)) ;
-
- if (!connector->encoder || !connector->encoder->crtc ||
- !connector->encoder->crtc->enabled)
- continue;
- nv_encoder = nouveau_encoder(connector->encoder);
- helper = connector->encoder->helper_private;
-
- if (nv_encoder->dcb->type != OUTPUT_DP)
- continue;
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
+ addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
+ data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
+ NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
+ "(0x%04x 0x%02x)\n", chid,
+ addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
- if (plugged)
- helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
- else
- helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
}
-
- drm_helper_hpd_irq_event(dev);
}
-void
-nv50_display_irq_handler(struct drm_device *dev)
+static void
+nv50_display_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t delayed = 0;
- if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
- uint32_t hpd0_bits, hpd1_bits = 0;
-
- hpd0_bits = nv_rd32(dev, 0xe054);
- nv_wr32(dev, 0xe054, hpd0_bits);
-
- if (dev_priv->chipset >= 0x90) {
- hpd1_bits = nv_rd32(dev, 0xe074);
- nv_wr32(dev, 0xe074, hpd1_bits);
- }
-
- spin_lock(&dev_priv->hpd_state.lock);
- dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
- dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
- spin_unlock(&dev_priv->hpd_state.lock);
-
- queue_work(dev_priv->wq, &dev_priv->hpd_work);
- }
-
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev)
if (!intr0 && !(intr1 & ~delayed))
break;
- if (intr0 & 0x00010000) {
+ if (intr0 & 0x001f0000) {
nv50_display_error_handler(dev);
- intr0 &= ~0x00010000;
+ intr0 &= ~0x001f0000;
}
if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev)
}
}
}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b85ee..f0e30b78ef6 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
-void nv50_display_irq_handler(struct drm_device *dev);
void nv50_display_irq_handler_bh(struct work_struct *work);
-void nv50_display_irq_hotplug_bh(struct work_struct *work);
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 00000000000..14e24e906ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pevo)
+{
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_channel *evo = *pevo;
+
+ if (!evo)
+ return;
+ *pevo = NULL;
+
+ dev_priv = evo->dev->dev_private;
+ dev_priv->evo_alloc &= ~(1 << evo->id);
+
+ nouveau_gpuobj_channel_takedown(evo);
+ nouveau_bo_unmap(evo->pushbuf_bo);
+ nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+
+ if (evo->user)
+ iounmap(evo->user);
+
+ kfree(evo);
+}
+
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
+ u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
+ u32 flags5)
+{
+ struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+ struct drm_device *dev = evo->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+ nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+ nv_wo32(obj, 4, limit);
+ nv_wo32(obj, 8, offset);
+ nv_wo32(obj, 12, 0x00000000);
+ nv_wo32(obj, 16, 0x00000000);
+ nv_wo32(obj, 20, flags5);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(evo, name, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo;
+ int ret;
+
+ evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+ if (!evo)
+ return -ENOMEM;
+ *pevo = evo;
+
+ for (evo->id = 0; evo->id < 5; evo->id++) {
+ if (dev_priv->evo_alloc & (1 << evo->id))
+ continue;
+
+ dev_priv->evo_alloc |= (1 << evo->id);
+ break;
+ }
+
+ if (evo->id == 5) {
+ kfree(evo);
+ return -ENODEV;
+ }
+
+ evo->dev = dev;
+ evo->user_get = 4;
+ evo->user_put = 0;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ false, true, &evo->pushbuf_bo);
+ if (ret == 0)
+ ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pevo);
+ return ret;
+ }
+
+ ret = nouveau_bo_map(evo->pushbuf_bo);
+ if (ret) {
+ NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pevo);
+ return ret;
+ }
+
+ evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
+ if (!evo->user) {
+ NV_ERROR(dev, "Error mapping EVO control regs.\n");
+ nv50_evo_channel_del(pevo);
+ return -ENOMEM;
+ }
+
+ /* bind primary evo channel's ramht to the channel */
+ if (dev_priv->evo && evo != dev_priv->evo)
+ nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_init(struct nouveau_channel *evo)
+{
+ struct drm_device *dev = evo->dev;
+ int id = evo->id, ret, i;
+ u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+ u32 tmp;
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+ if ((tmp & 0x009f0000) == 0x00020000)
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+ if ((tmp & 0x003f0000) == 0x00030000)
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
+
+ /* initialise fifo */
+ nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
+ NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
+ NV50_PDISPLAY_EVO_DMA_CB_VALID);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
+ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+
+ nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
+ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
+ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+ return -EBUSY;
+ }
+
+ /* enable error reporting on the channel */
+ nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
+
+ evo->dma.max = (4096/4) - 2;
+ evo->dma.put = 0;
+ evo->dma.cur = evo->dma.put;
+ evo->dma.free = evo->dma.max - evo->dma.cur;
+
+ ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(evo, 0);
+
+ return 0;
+}
+
+static void
+nv50_evo_channel_fini(struct nouveau_channel *evo)
+{
+ struct drm_device *dev = evo->dev;
+ int id = evo->id;
+
+ nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
+ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
+ NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
+ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+ }
+}
+
+static int
+nv50_evo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramht = NULL;
+ struct nouveau_channel *evo;
+ int ret;
+
+ /* create primary evo channel, the one we use for modesetting
+ * purporses
+ */
+ ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ if (ret)
+ return ret;
+ evo = dev_priv->evo;
+
+ /* setup object management on it, any other evo channel will
+ * use this also as there's no per-channel support on the
+ * hardware
+ */
+ ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
+ NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
+ if (ret) {
+ NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
+ if (ret) {
+ NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ /* create some default objects for the scanout memtypes we support */
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
+ 0, 0xffffffff, 0x00000000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00020000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00000000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+ } else
+ if (dev_priv->chipset != 0x50) {
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
+ 0, 0xffffffff, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
+ 0, 0xffffffff, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+nv50_evo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->evo) {
+ ret = nv50_evo_create(dev);
+ if (ret)
+ return ret;
+ }
+
+ return nv50_evo_channel_init(dev_priv->evo);
+}
+
+void
+nv50_evo_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->evo) {
+ nv50_evo_channel_fini(dev_priv->evo);
+ nv50_evo_channel_del(&dev_priv->evo);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae13343bce..aa4f0d3cea8 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
*
*/
+#ifndef __NV50_EVO_H__
+#define __NV50_EVO_H__
+
+int nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
+ u32 tile_flags, u32 magic_flags,
+ u32 offset, u32 limit);
+
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -111,3 +120,4 @@
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b15d2..50290dea0ac 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+struct nv50_fb_priv {
+ struct page *r100c08_page;
+ dma_addr_t r100c08;
+};
+
+static int
+nv50_fb_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!priv->r100c08_page) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
+ __free_page(priv->r100c08_page);
+ kfree(priv);
+ return -EFAULT;
+ }
+
+ dev_priv->engine.fb.priv = priv;
+ return 0;
+}
+
int
nv50_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ if (!dev_priv->engine.fb.priv) {
+ ret = nv50_fb_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = dev_priv->engine.fb.priv;
/* Not a clue what this is exactly. Without pointing it at a
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
* cause IOMMU "read from address 0" errors (rh#561267)
*/
- nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+ nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
switch (dev_priv->chipset) {
case 0x50:
- nv_wr32(dev, 0x100c90, 0x0707ff);
+ nv_wr32(dev, 0x100c90, 0x000707ff);
break;
case 0xa3:
case 0xa5:
case 0xa8:
- nv_wr32(dev, 0x100c90, 0x0d0fff);
+ nv_wr32(dev, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(dev, 0x100c90, 0x089d1fff);
break;
default:
- nv_wr32(dev, 0x100c90, 0x1d07ff);
+ nv_wr32(dev, 0x100c90, 0x001d07ff);
break;
}
@@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+
+ priv = dev_priv->engine.fb.priv;
+ if (!priv)
+ return;
+ dev_priv->engine.fb.priv = NULL;
+
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ kfree(priv);
}
void
nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
u32 trap[6], idx, chinst;
int i, ch;
@@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
return;
chinst = (trap[2] << 16) | trap[1];
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
- struct nouveau_channel *chan = dev_priv->fifos[ch];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
if (!chan || !chan->ramin)
continue;
@@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if (chinst == chan->ramin->vinst >> 12)
break;
}
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
"channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048eddb..791ded1c5c6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,29 +1,46 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
-void
+int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
- RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(info, rect);
- return;
- }
+ ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (ret)
+ return ret;
if (rect->rop != ROP_COPY) {
BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +62,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, 3);
}
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(info, region);
- return;
- }
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSub2D, 0x0110, 1);
OUT_RING(chan, 0);
@@ -80,9 +91,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
OUT_RING(chan, 0);
OUT_RING(chan, region->sy);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +104,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t width, dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (image->depth != 1) {
- cfb_imageblit(info, image);
- return;
- }
+ if (image->depth != 1)
+ return -ENODEV;
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
@@ -134,11 +137,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
- if (RING_SPACE(chan, push + 1)) {
- nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, push + 1);
+ if (ret)
+ return ret;
dwords -= push;
@@ -148,6 +149,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
FIRE_RING(chan);
+ return 0;
}
int
@@ -157,12 +159,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_gpuobj *eng2d = NULL;
- uint64_t fb;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
int ret, format;
- fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
-
switch (info->var.bits_per_pixel) {
case 8:
format = 0xf3;
@@ -190,12 +189,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
- nouveau_gpuobj_ref(NULL, &eng2d);
+ ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
if (ret)
return ret;
@@ -253,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb));
- OUT_RING(chan, lower_32_bits(fb));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -262,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb));
- OUT_RING(chan, lower_32_bits(fb));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd60c1..8dd04c5dac6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
static void
nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev)
/* We never schedule channel 0 or 127 */
for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+ if (dev_priv->channels.ptr[i] &&
+ dev_priv->channels.ptr[i]->ramfc) {
nv_wo32(cur, (nr * 4), i);
nr++;
}
@@ -60,7 +62,7 @@ static void
nv50_fifo_channel_enable(struct drm_device *dev, int channel)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->fifos[channel];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
uint32_t inst;
NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
}
@@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev)
NV_DEBUG(dev, "\n");
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
- if (dev_priv->fifos[i])
+ if (dev_priv->channels.ptr[i])
nv50_fifo_channel_enable(dev, i);
else
nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev)
if (!pfifo->playlist[0])
return;
+ nv_wr32(dev, 0x2140, 0x00000000);
+ nouveau_irq_unregister(dev, 8);
+
nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
}
@@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
}
ramfc = chan->ramfc;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj *ramfc = NULL;
+ unsigned long flags;
NV_DEBUG(dev, "ch%d\n", chan->id);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pfifo->reassign(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+
/* This will ensure the channel is seen as disabled. */
nouveau_gpuobj_ref(chan->ramfc, &ramfc);
nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
nv50_fifo_channel_disable(dev, 127);
nv50_fifo_playlist_update(dev);
+ pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the channel resources */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
nouveau_gpuobj_ref(NULL, &ramfc);
nouveau_gpuobj_ref(NULL, &chan->cache);
}
@@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
return 0;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
@@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev)
void
nv50_fifo_tlb_flush(struct drm_device *dev)
{
- nv50_vm_flush(dev, 5);
+ nv50_vm_flush_engine(dev, 5);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2bf3d6..6b149c0cc06 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nv50_display.h"
+
+static void nv50_gpio_isr(struct drm_device *dev);
+static void nv50_gpio_isr_bh(struct work_struct *work);
+
+struct nv50_gpio_priv {
+ struct list_head handlers;
+ spinlock_t lock;
+};
+
+struct nv50_gpio_handler {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ bool inhibit;
+
+ struct dcb_gpio_entry *gpio;
+
+ void (*handler)(void *data, int state);
+ void *data;
+};
+
static int
nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
{
@@ -75,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
return 0;
}
+int
+nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ struct dcb_gpio_entry *gpio;
+ unsigned long flags;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
+
+ gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+ if (!gpioh)
+ return -ENOMEM;
+
+ INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+ gpioh->dev = dev;
+ gpioh->gpio = gpio;
+ gpioh->handler = handler;
+ gpioh->data = data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add(&gpioh->head, &priv->handlers);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
void
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
- u32 reg, mask;
+ unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio) {
- NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
+ if (!gpio)
return;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+ if (gpioh->gpio != gpio ||
+ gpioh->handler != handler ||
+ gpioh->data != data)
+ continue;
+ list_del(&gpioh->head);
+ kfree(gpioh);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+bool
+nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+{
+ struct dcb_gpio_entry *gpio;
+ u32 reg, mask;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return false;
reg = gpio->line < 16 ? 0xe050 : 0xe070;
mask = 0x00010001 << (gpio->line & 0xf);
nv_wr32(dev, reg + 4, mask);
- nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ return (reg & mask) == mask;
+}
+
+static int
+nv50_gpio_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&priv->handlers);
+ spin_lock_init(&priv->lock);
+ pgpio->priv = priv;
+ return 0;
+}
+
+static void
+nv50_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ kfree(pgpio->priv);
+ pgpio->priv = NULL;
}
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv;
+ int ret;
+
+ if (!pgpio->priv) {
+ ret = nv50_gpio_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@ nv50_gpio_init(struct drm_device *dev)
nv_wr32(dev, 0xe074, 0xffffffff);
}
+ nouveau_irq_register(dev, 21, nv50_gpio_isr);
return 0;
}
+
+void
+nv50_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, 0xe050, 0x00000000);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe070, 0x00000000);
+ nouveau_irq_unregister(dev, 21);
+
+ nv50_gpio_destroy(dev);
+}
+
+static void
+nv50_gpio_isr_bh(struct work_struct *work)
+{
+ struct nv50_gpio_handler *gpioh =
+ container_of(work, struct nv50_gpio_handler, work);
+ struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ unsigned long flags;
+ int state;
+
+ state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+ if (state < 0)
+ return;
+
+ gpioh->handler(gpioh->data, state);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gpioh->inhibit = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo, ch;
+
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ ch = hi | lo;
+
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(gpioh, &priv->handlers, head) {
+ if (!(ch & (1 << gpioh->gpio->line)))
+ continue;
+
+ if (gpioh->inhibit)
+ continue;
+ gpioh->inhibit = true;
+
+ queue_work(dev_priv->wq, &gpioh->work);
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0af61..2d7ea75a09d 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_grctx.h"
+#include "nouveau_dma.h"
+#include "nouveau_vm.h"
+#include "nv50_evo.h"
+
+static int nv50_graph_register(struct drm_device *);
+static void nv50_graph_isr(struct drm_device *);
static void
nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@ nv50_graph_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nouveau_irq_register(dev, 12, nv50_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
nv_wr32(dev, 0x400138, 0xffffffff);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@ nv50_graph_init(struct drm_device *dev)
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
nv50_graph_init_regs(dev);
- nv50_graph_init_intr(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
return ret;
+ ret = nv50_graph_register(dev);
+ if (ret)
+ return ret;
+ nv50_graph_init_intr(dev);
return 0;
}
@@ -158,6 +168,8 @@ void
nv50_graph_takedown(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nv_wr32(dev, 0x40013c, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
void
@@ -190,7 +202,7 @@ nv50_graph_channel(struct drm_device *dev)
inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->ramin && chan->ramin->vinst == inst)
return chan;
@@ -211,7 +223,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
@@ -234,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
+ atomic_inc(&chan->vm->pgraph_refs);
return 0;
}
@@ -242,18 +255,31 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+ unsigned long flags;
NV_DEBUG(dev, "ch%d\n", chan->id);
if (!chan->ramin)
return;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+
+ atomic_dec(&chan->vm->pgraph_refs);
}
static int
@@ -306,7 +332,7 @@ nv50_graph_unload_context(struct drm_device *dev)
return 0;
}
-void
+static void
nv50_graph_context_switch(struct drm_device *dev)
{
uint32_t inst;
@@ -322,8 +348,8 @@ nv50_graph_context_switch(struct drm_device *dev)
}
static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct nouveau_gpuobj *gpuobj;
@@ -340,8 +366,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
}
static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
return -ERANGE;
@@ -351,16 +377,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
}
static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
chan->nvsw.vblsem_rval = data;
return 0;
}
static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +394,85 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
return -EINVAL;
- if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
- nv_wr32(dev, NV50_PDISPLAY_INTR_1,
- NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
- NV50_PDISPLAY_INTR_EN) |
- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
- }
+ drm_vblank_get(dev, data);
+ chan->nvsw.vblsem_head = data;
list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+
return 0;
}
-static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
- { 0x018c, nv50_graph_nvsw_dma_vblsem },
- { 0x0400, nv50_graph_nvsw_vblsem_offset },
- { 0x0404, nv50_graph_nvsw_vblsem_release_val },
- { 0x0408, nv50_graph_nvsw_vblsem_release },
- {}
-};
+static int
+nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct nouveau_page_flip_state s;
-struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
- { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
- { 0x0030, false, NULL }, /* null */
- { 0x5039, false, NULL }, /* m2mf */
- { 0x502d, false, NULL }, /* 2d */
- { 0x50c0, false, NULL }, /* compute */
- { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
- { 0x5097, false, NULL }, /* tesla (nv50) */
- { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
- { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
- { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
- {}
-};
+ if (!nouveau_finish_page_flip(chan, &s)) {
+ /* XXX - Do something here */
+ }
+
+ return 0;
+}
+
+static int
+nv50_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
+ NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
+ NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
+ NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
+
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+
+ /* tesla */
+ if (dev_priv->chipset == 0x50)
+ NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
+ else
+ if (dev_priv->chipset < 0xa0)
+ NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
+ else {
+ switch (dev_priv->chipset) {
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ NVOBJ_CLASS(dev, 0x8397, GR);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ NVOBJ_CLASS(dev, 0x8597, GR);
+ break;
+ case 0xaf:
+ NVOBJ_CLASS(dev, 0x8697, GR);
+ break;
+ }
+ }
+
+ /* compute */
+ NVOBJ_CLASS(dev, 0x50c0, GR);
+ if (dev_priv->chipset > 0xa0 &&
+ dev_priv->chipset != 0xaa &&
+ dev_priv->chipset != 0xac)
+ NVOBJ_CLASS(dev, 0x85c0, GR);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
void
nv50_graph_tlb_flush(struct drm_device *dev)
{
- nv50_vm_flush(dev, 0);
+ nv50_vm_flush_engine(dev, 0);
}
void
@@ -449,8 +515,535 @@ nv86_graph_tlb_flush(struct drm_device *dev)
nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
}
- nv50_vm_flush(dev, 0);
+ nv50_vm_flush_engine(dev, 0);
nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
+
+static struct nouveau_enum nv50_mp_exec_error_names[] =
+{
+ { 3, "STACK_UNDERFLOW" },
+ { 4, "QUADON_ACTIVE" },
+ { 8, "TIMEOUT" },
+ { 0x10, "INVALID_OPCODE" },
+ { 0x40, "BREAKPOINT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "IN" },
+ { 0x00000004, "OUT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+struct nouveau_enum nv50_data_error_names[] = {
+ { 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
+ { 0x00000004, "INVALID_VALUE" },
+ { 0x00000005, "INVALID_ENUM" },
+ { 0x00000008, "INVALID_OBJECT" },
+ { 0x00000009, "READ_ONLY_OBJECT" },
+ { 0x0000000a, "SUPERVISOR_OBJECT" },
+ { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
+ { 0x0000000c, "INVALID_BITFIELD" },
+ { 0x0000000d, "BEGIN_END_ACTIVE" },
+ { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
+ { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
+ { 0x00000010, "RT_DOUBLE_BIND" },
+ { 0x00000011, "RT_TYPES_MISMATCH" },
+ { 0x00000012, "RT_LINEAR_WITH_ZETA" },
+ { 0x00000015, "FP_TOO_FEW_REGS" },
+ { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
+ { 0x00000017, "RT_LINEAR_WITH_MSAA" },
+ { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
+ { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
+ { 0x0000001a, "RT_INVALID_ALIGNMENT" },
+ { 0x0000001b, "SAMPLER_OVER_LIMIT" },
+ { 0x0000001c, "TEXTURE_OVER_LIMIT" },
+ { 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
+ { 0x0000001f, "RT_BPP128_WITH_MS8" },
+ { 0x00000021, "Z_OUT_OF_BOUNDS" },
+ { 0x00000023, "XY_OUT_OF_BOUNDS" },
+ { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
+ { 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
+ { 0x00000029, "CP_NO_REG_SPACE_PACKED" },
+ { 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
+ { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
+ { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
+ { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
+ { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
+ { 0x00000031, "ENG2D_FORMAT_MISMATCH" },
+ { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
+ { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
+ { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
+ { 0x00000046, "LAYER_ID_NEEDS_GP" },
+ { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
+ { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_intr[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "COMPUTE_QUERY" },
+ { 0x00000010, "ILLEGAL_MTHD" },
+ { 0x00000020, "ILLEGAL_CLASS" },
+ { 0x00000040, "DOUBLE_NOTIFY" },
+ { 0x00001000, "CONTEXT_SWITCH" },
+ { 0x00010000, "BUFFER_NOTIFY" },
+ { 0x00100000, "DATA_ERROR" },
+ { 0x00200000, "TRAP" },
+ { 0x01000000, "SINGLE_STEP" },
+ {}
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ uint32_t addr, mp10, status, pc, oplow, ophigh;
+ int i;
+ int mps = 0;
+ for (i = 0; i < 4; i++) {
+ if (!(units & 1 << (i+24)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ addr = 0x408200 + (tpid << 12) + (i << 7);
+ else
+ addr = 0x408100 + (tpid << 11) + (i << 7);
+ mp10 = nv_rd32(dev, addr + 0x10);
+ status = nv_rd32(dev, addr + 0x14);
+ if (!status)
+ continue;
+ if (display) {
+ nv_rd32(dev, addr + 0x20);
+ pc = nv_rd32(dev, addr + 0x24);
+ oplow = nv_rd32(dev, addr + 0x70);
+ ophigh= nv_rd32(dev, addr + 0x74);
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+ "TP %d MP %d: ", tpid, i);
+ nouveau_enum_print(nv50_mp_exec_error_names, status);
+ printk(" at %06x warp %d, opcode %08x %08x\n",
+ pc&0xffffff, pc >> 24,
+ oplow, ophigh);
+ }
+ nv_wr32(dev, addr + 0x10, mp10);
+ nv_wr32(dev, addr + 0x14, 0);
+ mps++;
+ }
+ if (!mps && display)
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+ "No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+ uint32_t ustatus_new, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int tps = 0;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i, r;
+ uint32_t ustatus_addr, ustatus;
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ ustatus_addr = ustatus_old + (i << 12);
+ else
+ ustatus_addr = ustatus_new + (i << 11);
+ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+ if (!ustatus)
+ continue;
+ tps++;
+ switch (type) {
+ case 6: /* texture error... unknown for now */
+ nv50_fb_vm_trap(dev, display, name);
+ if (display) {
+ NV_ERROR(dev, "magic set %d:\n", i);
+ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ }
+ break;
+ case 7: /* MP error */
+ if (ustatus & 0x00010000) {
+ nv50_pgraph_mp_trap(dev, i, display);
+ ustatus &= ~0x00010000;
+ }
+ break;
+ case 8: /* TPDMA error */
+ {
+ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+ nv50_fb_vm_trap(dev, display, name);
+ /* 2d engine destination */
+ if (ustatus & 0x00000010) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000010;
+ }
+ /* Render target */
+ if (ustatus & 0x00000040) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000040;
+ }
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (display) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ i, e14, e10);
+ }
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000080;
+ }
+ }
+ break;
+ }
+ if (ustatus) {
+ if (display)
+ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ }
+ nv_wr32(dev, ustatus_addr, 0xc0000000);
+ }
+
+ if (!tps && display)
+ NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+{
+ u32 status = nv_rd32(dev, 0x400108);
+ u32 ustatus;
+
+ if (!status && display) {
+ NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+ return 1;
+ }
+
+ /* DISPATCH: Relays commands to other units and handles NOTIFY,
+ * COND, QUERY. If you get a trap from it, the command is still stuck
+ * in DISPATCH and you need to do something about it. */
+ if (status & 0x001) {
+ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+ }
+
+ nv_wr32(dev, 0x400500, 0x00000000);
+
+ /* Known to be triggered by screwed up NOTIFY and COND... */
+ if (ustatus & 0x00000001) {
+ u32 addr = nv_rd32(dev, 0x400808);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 datal = nv_rd32(dev, 0x40080c);
+ u32 datah = nv_rd32(dev, 0x400810);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 r848 = nv_rd32(dev, 0x400848);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x%08x "
+ "400808 0x%08x 400848 0x%08x\n",
+ chid, inst, subc, class, mthd, datah,
+ datal, addr, r848);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x400808, 0);
+ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+ nv_wr32(dev, 0x400848, 0);
+ ustatus &= ~0x00000001;
+ }
+
+ if (ustatus & 0x00000002) {
+ u32 addr = nv_rd32(dev, 0x40084c);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, 0x40085c);
+ u32 class = nv_rd32(dev, 0x400814);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x 40084c 0x%08x\n",
+ chid, inst, subc, class, mthd,
+ data, addr);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x40084c, 0);
+ ustatus &= ~0x00000002;
+ }
+
+ if (ustatus && display) {
+ NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+ "0x%08x)\n", ustatus);
+ }
+
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x001);
+ status &= ~0x001;
+ if (!status)
+ return 0;
+ }
+
+ /* M2MF: Memory to memory copy engine. */
+ if (status & 0x002) {
+ u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+ nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
+ nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 2);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x002);
+ status &= ~0x002;
+ }
+
+ /* VFETCH: Fetches data from vertex buffers. */
+ if (status & 0x004) {
+ u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+ nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
+ nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+ }
+
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x004);
+ status &= ~0x004;
+ }
+
+ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+ if (status & 0x008) {
+ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+ nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
+ nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 0x80);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x401800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x008);
+ status &= ~0x008;
+ }
+
+ /* CCACHE: Handles code and c[] caches and fills them. */
+ if (status & 0x010) {
+ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+ nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+ " %08x %08x %08x\n",
+ nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
+ nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
+ nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
+ nv_rd32(dev, 0x40581c));
+
+ }
+
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x010);
+ status &= ~0x010;
+ }
+
+ /* Unknown, not seen yet... 0x402000 is the only trap status reg
+ * remaining, so try to handle it anyway. Perhaps related to that
+ * unknown DMA slot on tesla? */
+ if (status & 0x20) {
+ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+ /* no status modifiction on purpose */
+ }
+
+ /* TEXTURE: CUDA texturing units */
+ if (status & 0x040) {
+ nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
+ "PGRAPH - TRAP_TEXTURE");
+ nv_wr32(dev, 0x400108, 0x040);
+ status &= ~0x040;
+ }
+
+ /* MP: CUDA execution engines. */
+ if (status & 0x080) {
+ nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
+ "PGRAPH - TRAP_MP");
+ nv_wr32(dev, 0x400108, 0x080);
+ status &= ~0x080;
+ }
+
+ /* TPDMA: Handles TP-initiated uncached memory accesses:
+ * l[], g[], stack, 2d surfaces, render targets. */
+ if (status & 0x100) {
+ nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
+ "PGRAPH - TRAP_TPDMA");
+ nv_wr32(dev, 0x400108, 0x100);
+ status &= ~0x100;
+ }
+
+ if (status) {
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
+ nv_wr32(dev, 0x400108, status);
+ }
+
+ return 1;
+}
+
+static int
+nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv50_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, 0x400100))) {
+ u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
+ u32 chid = nv50_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 show = stat;
+
+ if (stat & 0x00000010) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
+ mthd, data))
+ show &= ~0x00000010;
+ }
+
+ if (stat & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, 0x400100, 0x00001000);
+ nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
+ nv50_graph_context_switch(dev);
+ stat &= ~0x00001000;
+ show &= ~0x00001000;
+ }
+
+ show = (show && nouveau_ratelimit()) ? show : 0;
+
+ if (show & 0x00100000) {
+ u32 ecode = nv_rd32(dev, 0x400110);
+ NV_INFO(dev, "PGRAPH - DATA_ERROR ");
+ nouveau_enum_print(nv50_data_error_names, ecode);
+ printk("\n");
+ }
+
+ if (stat & 0x00200000) {
+ if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
+ show &= ~0x00200000;
+ }
+
+ nv_wr32(dev, 0x400100, stat);
+ nv_wr32(dev, 0x400500, 0x00010001);
+
+ if (show) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv50_graph_intr, show);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ }
+
+ if (nv_rd32(dev, 0x400824) & (1 << 31))
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229b764..2e1b1cd19a4 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
#include "drmP.h"
#include "drm.h"
+
#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+#define BAR1_VM_BASE 0x0020000000ULL
+#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
+#define BAR3_VM_BASE 0x0000000000ULL
+#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
struct nv50_instmem_priv {
uint32_t save1700[5]; /* 0x1700->0x1710 */
- struct nouveau_gpuobj *pramin_pt;
- struct nouveau_gpuobj *pramin_bar;
- struct nouveau_gpuobj *fb_bar;
+ struct nouveau_gpuobj *bar1_dmaobj;
+ struct nouveau_gpuobj *bar3_dmaobj;
};
static void
@@ -48,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
return;
nouveau_gpuobj_ref(NULL, &chan->ramfc);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan)
}
static int
-nv50_channel_new(struct drm_device *dev, u32 size,
+nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
struct nouveau_channel **pchan)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
struct nouveau_channel *chan;
- int ret;
+ int ret, i;
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
@@ -92,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size,
return ret;
}
+ for (i = 0; i < 0x4000; i += 8) {
+ nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+ nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
+ }
+
+ ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
chan->ramin->pinst + fc,
chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv;
struct nouveau_channel *chan;
+ struct nouveau_vm *vm;
int ret, i;
u32 tmp;
@@ -127,112 +146,87 @@ nv50_instmem_init(struct drm_device *dev)
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
if (ret) {
NV_ERROR(dev, "Failed to init RAMIN heap\n");
- return -ENOMEM;
+ goto error;
}
- /* we need a channel to plug into the hw to control the BARs */
- ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
+ /* BAR3 */
+ ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
+ &dev_priv->bar3_vm);
if (ret)
- return ret;
- chan = dev_priv->fifos[127] = dev_priv->fifos[0];
+ goto error;
- /* allocate page table for PRAMIN BAR */
- ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->pramin_pt);
+ ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
+ 0x1000, NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
if (ret)
- return ret;
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
- nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
- nv_wo32(chan->vm_pd, 0x0004, 0);
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
- /* DMA object for PRAMIN BAR */
- ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
+ ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
if (ret)
- return ret;
- nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
- nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
- nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
-
- /* map channel into PRAMIN, gpuobj didn't do it for us */
- ret = nv50_instmem_bind(dev, chan->ramin);
+ goto error;
+ dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
+
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar3_dmaobj);
if (ret)
- return ret;
+ goto error;
- /* poke regs... */
nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
- nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
-
- tmp = nv_ri32(dev, 0);
- nv_wi32(dev, 0, ~tmp);
- if (nv_ri32(dev, 0) != ~tmp) {
- NV_ERROR(dev, "PRAMIN readback failed\n");
- return -EIO;
- }
- nv_wi32(dev, 0, tmp);
+ nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
+ dev_priv->engine.instmem.flush(dev);
dev_priv->ramin_available = true;
- /* Determine VM layout */
- dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
- dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
- dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
- dev_priv->vm_vram_size = dev_priv->vram_size;
- if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
- dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
- dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
- dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
- dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
- NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
- dev_priv->vm_gart_base,
- dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
- NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
- dev_priv->vm_vram_base,
- dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
- /* VRAM page table(s), mapped into VM at +1GiB */
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
- 0, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->vm_vram_pt[i]);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
- dev_priv->vm_vram_pt_nr = i;
- return ret;
- }
- dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
-
- nv_wo32(chan->vm_pd, 0x10 + (i*8),
- chan->vm_vram_pt[i]->vinst | 0x61);
- nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
+ tmp = nv_ro32(chan->ramin, 0);
+ nv_wo32(chan->ramin, 0, ~tmp);
+ if (nv_ro32(chan->ramin, 0) != ~tmp) {
+ NV_ERROR(dev, "PRAMIN readback failed\n");
+ ret = -EIO;
+ goto error;
}
+ nv_wo32(chan->ramin, 0, tmp);
- /* DMA object for FB BAR */
- ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
+ /* BAR1 */
+ ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
if (ret)
- return ret;
- nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
- nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
- pci_resource_len(dev->pdev, 1) - 1);
- nv_wo32(priv->fb_bar, 0x08, 0x40000000);
- nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
- nv_wo32(priv->fb_bar, 0x10, 0x00000000);
- nv_wo32(priv->fb_bar, 0x14, 0x00000000);
+ goto error;
- dev_priv->engine.instmem.flush(dev);
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar1_dmaobj);
+ if (ret)
+ goto error;
- nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
+ nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
+ /* Create shared channel VM, space is reserved at the beginning
+ * to catch "NULL pointer" references
+ */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+ &dev_priv->chan_vm);
+ if (ret)
+ return ret;
+
return 0;
+
+error:
+ nv50_instmem_takedown(dev);
+ return ret;
}
void
@@ -240,7 +234,7 @@ nv50_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->fifos[0];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
int i;
NV_DEBUG(dev, "\n");
@@ -250,23 +244,23 @@ nv50_instmem_takedown(struct drm_device *dev)
dev_priv->ramin_available = false;
- /* Restore state from before init */
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
for (i = 0x1700; i <= 0x1710; i += 4)
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
- nouveau_gpuobj_ref(NULL, &priv->fb_bar);
- nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
- nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
- /* Destroy dummy channel */
- if (chan) {
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
- dev_priv->vm_vram_pt_nr = 0;
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
+ dev_priv->channels.ptr[127] = 0;
+ nv50_channel_del(&dev_priv->channels.ptr[0]);
- nv50_channel_del(&dev_priv->fifos[0]);
- dev_priv->fifos[127] = NULL;
- }
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+ if (dev_priv->ramin_heap.free_stack.next)
+ drm_mm_takedown(&dev_priv->ramin_heap);
dev_priv->engine.instmem.priv = NULL;
kfree(priv);
@@ -276,16 +270,8 @@ int
nv50_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin;
- int i;
- ramin->im_backing_suspend = vmalloc(ramin->size);
- if (!ramin->im_backing_suspend)
- return -ENOMEM;
-
- for (i = 0; i < ramin->size; i += 4)
- ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+ dev_priv->ramin_available = false;
return 0;
}
@@ -294,146 +280,121 @@ nv50_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
int i;
- dev_priv->ramin_available = false;
- dev_priv->ramin_base = ~0;
- for (i = 0; i < ramin->size; i += 4)
- nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
- dev_priv->ramin_available = true;
- vfree(ramin->im_backing_suspend);
- ramin->im_backing_suspend = NULL;
-
/* Poke the relevant regs, and pray it works :) */
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
NV50_PUNK_BAR3_CTXDMA_VALID);
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
+
+ dev_priv->ramin_available = true;
}
+struct nv50_gpuobj_node {
+ struct nouveau_vram *vram;
+ struct nouveau_vma chan_vma;
+ u32 align;
+};
+
+
int
-nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *sz)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
+ struct drm_device *dev = gpuobj->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node = NULL;
int ret;
- if (gpuobj->im_backing)
- return -EINVAL;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->align = align;
- *sz = ALIGN(*sz, 4096);
- if (*sz == 0)
- return -EINVAL;
+ size = (size + 4095) & ~4095;
+ align = max(align, (u32)4096);
- ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
- true, false, &gpuobj->im_backing);
+ ret = vram->get(dev, size, align, 0, 0, &node->vram);
if (ret) {
- NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+ kfree(node);
return ret;
}
- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- return ret;
+ gpuobj->vinst = node->vram->offset;
+
+ if (gpuobj->flags & NVOBJ_FLAG_VM) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
+ NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+ &node->chan_vma);
+ if (ret) {
+ vram->put(dev, &node->vram);
+ kfree(node);
+ return ret;
+ }
+
+ nouveau_vm_map(&node->chan_vma, node->vram);
+ gpuobj->vinst = node->chan_vma.offset;
}
- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+ gpuobj->size = size;
+ gpuobj->node = node;
return 0;
}
void
-nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
{
+ struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node;
+
+ node = gpuobj->node;
+ gpuobj->node = NULL;
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- nouveau_bo_unpin(gpuobj->im_backing);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- gpuobj->im_backing = NULL;
+ if (node->chan_vma.node) {
+ nouveau_vm_unmap(&node->chan_vma);
+ nouveau_vm_put(&node->chan_vma);
}
+ vram->put(dev, &node->vram);
+ kfree(node);
}
int
-nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
- uint32_t pte, pte_end;
- uint64_t vram;
-
- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
- gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
- pte = (gpuobj->im_pramin->start >> 12) << 1;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
- vram = gpuobj->vinst;
-
- NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
- gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
- vram |= 1;
- if (dev_priv->vram_sys_base) {
- vram += dev_priv->vram_sys_base;
- vram |= 0x30;
- }
-
- while (pte < pte_end) {
- nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
- nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
- vram += 0x1000;
- pte += 2;
- }
- dev_priv->engine.instmem.flush(dev);
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct nv50_gpuobj_node *node = gpuobj->node;
+ int ret;
- nv50_vm_flush(dev, 6);
+ ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
+ NV_MEM_ACCESS_RW, &node->vram->bar_vma);
+ if (ret)
+ return ret;
- gpuobj->im_bound = 1;
+ nouveau_vm_map(&node->vram->bar_vma, node->vram);
+ gpuobj->pinst = node->vram->bar_vma.offset;
return 0;
}
-int
-nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+void
+nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- uint32_t pte, pte_end;
-
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- /* can happen during late takedown */
- if (unlikely(!dev_priv->ramin_available))
- return 0;
+ struct nv50_gpuobj_node *node = gpuobj->node;
- pte = (gpuobj->im_pramin->start >> 12) << 1;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-
- while (pte < pte_end) {
- nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
- nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
- pte += 2;
+ if (node->vram->bar_vma.node) {
+ nouveau_vm_unmap(&node->vram->bar_vma);
+ nouveau_vm_put(&node->vram->bar_vma);
}
- dev_priv->engine.instmem.flush(dev);
-
- gpuobj->im_bound = 0;
- return 0;
}
void
@@ -452,11 +413,3 @@ nv84_instmem_flush(struct drm_device *dev)
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
-void
-nv50_vm_flush(struct drm_device *dev, int engine)
-{
- nv_wr32(dev, 0x100c80, (engine << 16) | 1);
- if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
- NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 00000000000..38e523e1099
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2])
+{
+ struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
+ u64 phys = 0xdeadcafe00000000ULL;
+ u32 coverage = 0;
+
+ if (pgt[0]) {
+ phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
+ coverage = (pgt[0]->size >> 3) << 12;
+ } else
+ if (pgt[1]) {
+ phys = 0x00000001 | pgt[1]->vinst; /* present */
+ coverage = (pgt[1]->size >> 3) << 16;
+ }
+
+ if (phys & 1) {
+ if (dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ phys |= 0x30;
+ }
+
+ if (coverage <= 32 * 1024 * 1024)
+ phys |= 0x60;
+ else if (coverage <= 64 * 1024 * 1024)
+ phys |= 0x40;
+ else if (coverage < 128 * 1024 * 1024)
+ phys |= 0x20;
+ }
+
+ nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+ nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+static inline u64
+nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u64 phys, u32 memtype, u32 target)
+{
+ struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+
+ phys |= 1; /* present */
+ phys |= (u64)memtype << 40;
+
+ /* IGPs don't have real VRAM, re-target to stolen system memory */
+ if (target == 0 && dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ target = 3;
+ }
+
+ phys |= target << 4;
+
+ if (vma->access & NV_MEM_ACCESS_SYS)
+ phys |= (1 << 6);
+
+ if (!(vma->access & NV_MEM_ACCESS_WO))
+ phys |= (1 << 3);
+
+ return phys;
+}
+
+void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+ u32 block;
+ int i;
+
+ phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+ pte <<= 3;
+ cnt <<= 3;
+
+ while (cnt) {
+ u32 offset_h = upper_32_bits(phys);
+ u32 offset_l = lower_32_bits(phys);
+
+ for (i = 7; i >= 0; i--) {
+ block = 1 << (i + 3);
+ if (cnt >= block && !(pte & (block - 1)))
+ break;
+ }
+ offset_l |= (i << 7);
+
+ phys += block << (vma->node->type - 3);
+ cnt -= block;
+
+ while (block) {
+ nv_wo32(pgt, pte + 0, offset_l);
+ nv_wo32(pgt, pte + 4, offset_h);
+ pte += 8;
+ block -= 8;
+ }
+ }
+}
+
+void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u32 pte, dma_addr_t *list, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+ pinstmem->flush(vm->dev);
+
+ /* BAR */
+ if (vm != dev_priv->chan_vm) {
+ nv50_vm_flush_engine(vm->dev, 6);
+ return;
+ }
+
+ pfifo->tlb_flush(vm->dev);
+
+ if (atomic_read(&vm->pgraph_refs))
+ pgraph->tlb_flush(vm->dev);
+ if (atomic_read(&vm->pcrypt_refs))
+ pcrypt->tlb_flush(vm->dev);
+}
+
+void
+nv50_vm_flush_engine(struct drm_device *dev, int engine)
+{
+ nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+ if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 00000000000..58e98ad3634
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static int types[0x80] = {
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+bool
+nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+
+ if (likely(type < ARRAY_SIZE(types) && types[type]))
+ return true;
+ return false;
+}
+
+void
+nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *this;
+ struct nouveau_vram *vram;
+
+ vram = *pvram;
+ *pvram = NULL;
+ if (unlikely(vram == NULL))
+ return;
+
+ mutex_lock(&mm->mutex);
+ while (!list_empty(&vram->regions)) {
+ this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+
+ list_del(&this->rl_entry);
+ nouveau_mm_put(mm, this);
+ }
+ mutex_unlock(&mm->mutex);
+
+ kfree(vram);
+}
+
+int
+nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
+ u32 type, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ struct nouveau_vram *vram;
+ int ret;
+
+ if (!types[type])
+ return -EINVAL;
+ size >>= 12;
+ align >>= 12;
+ size_nc >>= 12;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vram->regions);
+ vram->dev = dev_priv->dev;
+ vram->memtype = type;
+ vram->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &vram);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &vram->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ vram->offset = (u64)r->offset << 12;
+ *pvram = vram;
+ return 0;
+}
+
+static u32
+nv50_vram_rblock(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(dev, 0x100200);
+ r4 = nv_rd32(dev, 0x100204);
+ rt = nv_rd32(dev, 0x100250);
+ ru = nv_rd32(dev, 0x001540);
+ NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = ((r4 & 0x01000000) ? 8 : 4);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != dev_priv->vram_size) {
+ NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+ (u32)(dev_priv->vram_size >> 20));
+ NV_WARN(dev, "we calculated %dMiB VRAM\n",
+ (u32)(predicted >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+int
+nv50_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10020c);
+ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+ dev_priv->vram_size &= 0xffffffff00ULL;
+
+ switch (dev_priv->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
+ dev_priv->vram_rblock_size = 4096;
+ break;
+ default:
+ dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 00000000000..ec18ae1c388
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+
+static void nv84_crypt_isr(struct drm_device *);
+
+int
+nv84_crypt_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ &chan->crypt_ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ramin, 0xa0, 0x00190000);
+ nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
+ nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
+ nv_wo32(ramin, 0xac, 0);
+ nv_wo32(ramin, 0xb0, 0);
+ nv_wo32(ramin, 0xb4, 0);
+
+ dev_priv->engine.instmem.flush(dev);
+ atomic_inc(&chan->vm->pcrypt_refs);
+ return 0;
+}
+
+void
+nv84_crypt_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ u32 inst;
+
+ if (!chan->crypt_ctx)
+ return;
+
+ inst = (chan->ramin->vinst >> 12);
+ inst |= 0x80000000;
+
+ /* mark context as invalid if still on the hardware, not
+ * doing this causes issues the next time PCRYPT is used,
+ * unsurprisingly :)
+ */
+ nv_wr32(dev, 0x10200c, 0x00000000);
+ if (nv_rd32(dev, 0x102188) == inst)
+ nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
+ if (nv_rd32(dev, 0x10218c) == inst)
+ nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
+ nv_wr32(dev, 0x10200c, 0x00000010);
+
+ nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
+ atomic_dec(&chan->vm->pcrypt_refs);
+}
+
+void
+nv84_crypt_tlb_flush(struct drm_device *dev)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+int
+nv84_crypt_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+ if (!pcrypt->registered) {
+ NVOBJ_CLASS(dev, 0x74c1, CRYPT);
+ pcrypt->registered = true;
+ }
+
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ nouveau_irq_register(dev, 14, nv84_crypt_isr);
+ nv_wr32(dev, 0x102130, 0xffffffff);
+ nv_wr32(dev, 0x102140, 0xffffffbf);
+
+ nv_wr32(dev, 0x10200c, 0x00000010);
+ return 0;
+}
+
+void
+nv84_crypt_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x102140, 0x00000000);
+ nouveau_irq_unregister(dev, 14);
+}
+
+static void
+nv84_crypt_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x102130);
+ u32 mthd = nv_rd32(dev, 0x102190);
+ u32 data = nv_rd32(dev, 0x102194);
+ u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+ int show = nouveau_ratelimit();
+
+ if (show) {
+ NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ stat, mthd, data, inst);
+ }
+
+ nv_wr32(dev, 0x102130, stat);
+ nv_wr32(dev, 0x10200c, 0x10);
+
+ nv50_fb_vm_trap(dev, show, "PCRYPT");
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
new file mode 100644
index 00000000000..fa5d4c23438
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
+
+int
+nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
+
+ ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (ret)
+ return ret;
+
+ if (rect->rop != ROP_COPY) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 1);
+ }
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING (chan, rect->color);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+ OUT_RING (chan, rect->dx);
+ OUT_RING (chan, rect->dy);
+ OUT_RING (chan, rect->dx + rect->width);
+ OUT_RING (chan, rect->dy + rect->height);
+ if (rect->rop != ROP_COPY) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 3);
+ }
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+ OUT_RING (chan, region->dx);
+ OUT_RING (chan, region->dy);
+ OUT_RING (chan, region->width);
+ OUT_RING (chan, region->height);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, region->sx);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, region->sy);
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ uint32_t *palette = info->pseudo_palette;
+ int ret;
+
+ if (image->depth != 1)
+ return -ENODEV;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ width = ALIGN(image->width, 32);
+ dwords = (width * image->height) >> 5;
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ OUT_RING (chan, palette[image->bg_color] | mask);
+ OUT_RING (chan, palette[image->fg_color] | mask);
+ } else {
+ OUT_RING (chan, image->bg_color);
+ OUT_RING (chan, image->fg_color);
+ }
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+ OUT_RING (chan, image->width);
+ OUT_RING (chan, image->height);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, image->dx);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, image->dy);
+
+ while (dwords) {
+ int push = dwords > 2047 ? 2047 : dwords;
+
+ ret = RING_SPACE(chan, push + 1);
+ if (ret)
+ return ret;
+
+ dwords -= push;
+
+ BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+ OUT_RINGp(chan, data, push);
+ data += push;
+ }
+
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_accel_init(struct fb_info *info)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+ int ret, format;
+
+ ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
+ if (ret)
+ return ret;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = 0xf3;
+ break;
+ case 15:
+ format = 0xf8;
+ break;
+ case 16:
+ format = 0xe8;
+ break;
+ case 32:
+ switch (info->var.transp.length) {
+ case 0: /* depth 24 */
+ case 8: /* depth 32, just use 24.. */
+ format = 0xe6;
+ break;
+ case 2: /* depth 30 */
+ format = 0xd1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = RING_SPACE(chan, 60);
+ if (ret) {
+ WARN_ON(1);
+ nouveau_fbcon_gpu_lockup(info);
+ return ret;
+ }
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+ OUT_RING (chan, 0x0000902d);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+ OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
+ OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 3);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+ OUT_RING (chan, 0x55);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+ OUT_RING (chan, 4);
+ OUT_RING (chan, format);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+ OUT_RING (chan, 2);
+ OUT_RING (chan, 1);
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+ OUT_RING (chan, format);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, info->fix.line_length);
+ OUT_RING (chan, info->var.xres_virtual);
+ OUT_RING (chan, info->var.yres_virtual);
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, info->fix.line_length);
+ OUT_RING (chan, info->var.xres_virtual);
+ OUT_RING (chan, info->var.yres_virtual);
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ FIRE_RING (chan);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 890c2b95fbc..e6f92c541db 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -25,6 +25,49 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static void nvc0_fifo_isr(struct drm_device *);
+
+struct nvc0_fifo_priv {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+ struct nouveau_vma user_vma;
+ int spoon_nr;
+};
+
+struct nvc0_fifo_chan {
+ struct nouveau_bo *user;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static void
+nvc0_fifo_playlist_update(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nouveau_gpuobj *cur;
+ int i, p;
+
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
+
+ for (i = 0, p = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000004);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
+ if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO - playlist update failed\n");
+}
void
nvc0_fifo_disable(struct drm_device *dev)
@@ -57,12 +100,135 @@ nvc0_fifo_channel_id(struct drm_device *dev)
int
nvc0_fifo_create_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nvc0_fifo_chan *fifoch;
+ u64 ib_virt, user_vinst;
+ int ret;
+
+ chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
+ if (!chan->fifo_priv)
+ return -ENOMEM;
+ fifoch = chan->fifo_priv;
+
+ /* allocate vram for control regs, map into polling area */
+ ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, true, true, &fifoch->user);
+ if (ret)
+ goto error;
+
+ ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ nouveau_bo_ref(NULL, &fifoch->user);
+ goto error;
+ }
+
+ user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_bo_map(fifoch->user);
+ if (ret) {
+ nouveau_bo_unpin(fifoch->user);
+ nouveau_bo_ref(NULL, &fifoch->user);
+ goto error;
+ }
+
+ nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+ fifoch->user->bo.mem.mm_node);
+
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user_vma.offset + (chan->id * 0x1000),
+ PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+
+ /* zero channel regs */
+ nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
+
+ /* ramfc */
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+ chan->ramin->vinst, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
+ nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
+ nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
+ nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
+ nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
+ nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
+ nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
+ nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
+ nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
+ nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
+ nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
+ nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
+ nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
+ (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
+ nvc0_fifo_playlist_update(dev);
return 0;
+
+error:
+ pfifo->destroy_context(chan);
+ return ret;
}
void
nvc0_fifo_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct nvc0_fifo_chan *fifoch;
+
+ nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+
+ nvc0_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ fifoch = chan->fifo_priv;
+ chan->fifo_priv = NULL;
+ if (!fifoch)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
+ if (fifoch->user) {
+ nouveau_bo_unmap(fifoch->user);
+ nouveau_bo_unpin(fifoch->user);
+ nouveau_bo_ref(NULL, &fifoch->user);
+ }
+ kfree(fifoch);
}
int
@@ -77,14 +243,213 @@ nvc0_fifo_unload_context(struct drm_device *dev)
return 0;
}
+static void
+nvc0_fifo_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+
+ priv = pfifo->priv;
+ if (!priv)
+ return;
+
+ nouveau_vm_put(&priv->user_vma);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ kfree(priv);
+}
+
void
nvc0_fifo_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, 0x002140, 0x00000000);
+ nvc0_fifo_destroy(dev);
+}
+
+static int
+nvc0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pfifo->priv = priv;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
+ 12, NV_MEM_ACCESS_RW, &priv->user_vma);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ return 0;
+
+error:
+ nvc0_fifo_destroy(dev);
+ return ret;
}
int
nvc0_fifo_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+ int ret, i;
+
+ if (!pfifo->priv) {
+ ret = nvc0_fifo_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pfifo->priv;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+ nv_wr32(dev, 0x002204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* assign engines to subfifos */
+ if (priv->spoon_nr >= 3) {
+ nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
+ nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
+ nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
+ nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
+ nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < 3; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
return 0;
}
+struct nouveau_enum nvc0_fifo_fault_unit[] = {
+ { 0, "PGRAPH" },
+ { 3, "PEEPHOLE" },
+ { 4, "BAR1" },
+ { 5, "BAR3" },
+ { 7, "PFIFO" },
+ {}
+};
+
+struct nouveau_enum nvc0_fifo_fault_reason[] = {
+ { 0, "PT_NOT_PRESENT" },
+ { 1, "PT_TOO_SHORT" },
+ { 2, "PAGE_NOT_PRESENT" },
+ { 3, "VM_LIMIT_EXCEEDED" },
+ {}
+};
+
+struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+
+ NV_INFO(dev, "PSUBFIFO %d:", unit);
+ nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
+ NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x002100);
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ }
+
+ nv_wr32(dev, 0x2140, 0);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 717a5177a8d..5feacd5d5fa 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -22,9 +22,16 @@
* Authors: Ben Skeggs
*/
+#include <linux/firmware.h>
+
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void nvc0_graph_isr(struct drm_device *);
+static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
void
nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
@@ -37,39 +44,735 @@ nvc0_graph_channel(struct drm_device *dev)
return NULL;
}
+static int
+nvc0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nvc0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nvc0_grctx_generate(chan);
+ if (ret) {
+ kfree(ctx);
+ return ret;
+ }
+
+ ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret) {
+ kfree(ctx);
+ return ret;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+}
+
+static int
+nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int i = 0, gpc, tp, ret;
+ u32 magic;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00408004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408008);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408010);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418810);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419848);
+ nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00419004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419008);
+ nv_wo32(grch->mmio, i++ * 4, 0x00000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418808);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+ u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ }
+ }
+
+ grch->mmio_nr = i / 2;
+ return 0;
+}
+
int
nvc0_graph_create_context(struct nouveau_channel *chan)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv = pgraph->priv;
+ struct nvc0_graph_chan *grch;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!chan->pgraph_ctx)
+ return -ENOMEM;
+ grch = chan->pgraph_ctx;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ chan->ramin_grctx = grch->grctx;
+ grctx = grch->grctx;
+
+ ret = nvc0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nvc0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+ pinstmem->flush(dev);
return 0;
+
+error:
+ pgraph->destroy_context(chan);
+ return ret;
}
void
nvc0_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct nvc0_graph_chan *grch;
+
+ grch = chan->pgraph_ctx;
+ chan->pgraph_ctx = NULL;
+ if (!grch)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->ramin_grctx = NULL;
}
int
nvc0_graph_load_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
return 0;
}
int
nvc0_graph_unload_context(struct drm_device *dev)
{
- return 0;
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ return nvc0_graph_unload_context_to(dev, inst);
+}
+
+static void
+nvc0_graph_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+
+ priv = pgraph->priv;
+ if (!priv)
+ return;
+
+ nouveau_irq_unregister(dev, 12);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+ kfree(priv);
}
void
nvc0_graph_takedown(struct drm_device *dev)
{
+ nvc0_graph_destroy(dev);
+}
+
+static int
+nvc0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+ int ret, gpc, i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pgraph->priv = priv;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tp_total += priv->tp_nr[gpc];
+ }
+
+ /*XXX: these need figuring out... */
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
+ priv->magic_not_rop_nr = 0x07;
+ /* filled values up to tp_total, the rest 0 */
+ priv->magicgpc980[0] = 0x22111000;
+ priv->magicgpc980[1] = 0x00000233;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x000ba2e9;
+ } else
+ if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
+ priv->magic_not_rop_nr = 0x05;
+ priv->magicgpc980[0] = 0x11110000;
+ priv->magicgpc980[1] = 0x00233222;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00092493;
+ } else
+ if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
+ priv->magic_not_rop_nr = 0x06;
+ priv->magicgpc980[0] = 0x11110000;
+ priv->magicgpc980[1] = 0x03332222;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00088889;
+ }
+ break;
+ case 0xc3: /* 450, 4/0/0/0, 2 */
+ priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc980[0] = 0x00003210;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00200000;
+ break;
+ case 0xc4: /* 460, 3/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x01;
+ priv->magicgpc980[0] = 0x02321100;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00124925;
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
+ priv->tp_nr[3], priv->rop_nr);
+ /* use 0xc3's values... */
+ priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc980[0] = 0x00003210;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00200000;
+ }
+
+ nouveau_irq_register(dev, 12, nvc0_graph_isr);
+ NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
+ NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+ NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
+ return 0;
+
+error:
+ nvc0_graph_destroy(dev);
+ return ret;
+}
+
+static void
+nvc0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv = pgraph->priv;
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x00006fe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x013901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int gpc;
+
+ // TP ROP UNKVAL(magic_not_rop_nr)
+ // 450: 4/0/0/0 2 3
+ // 460: 3/4/0/0 4 1
+ // 465: 3/4/4/0 4 7
+ // 470: 3/3/4/4 5 5
+ // 480: 3/4/4/4 6 6
+
+ // magicgpc918
+ // 450: 00200000 00000000001000000000000000000000
+ // 460: 00124925 00000000000100100100100100100101
+ // 465: 000ba2e9 00000000000010111010001011101001
+ // 470: 00092493 00000000000010010010010010010011
+ // 480: 00088889 00000000000010001000100010001001
+
+ /* filled values up to tp_total, remainder 0 */
+ // 450: 00003210 00000000 00000000 00000000
+ // 460: 02321100 00000000 00000000 00000000
+ // 465: 22111000 00000233 00000000 00000000
+ // 470: 11110000 00233222 00000000 00000000
+ // 480: 11110000 03332222 00000000 00000000
+
+ nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
+}
+
+static void
+nvc0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+ nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
+ nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x40601c, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int gpc, tp;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nvc0_graph_init_rop(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static int
+nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
+ const char *code_fw, const char *data_fw)
+{
+ const struct firmware *fw;
+ char name[32];
+ int ret, i;
+
+ snprintf(name, sizeof(name), "nouveau/%s", data_fw);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "failed to load %s\n", data_fw);
+ return ret;
+ }
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < fw->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
+ release_firmware(fw);
+
+ snprintf(name, sizeof(name), "nouveau/%s", code_fw);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "failed to load %s\n", code_fw);
+ return ret;
+ }
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < fw->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
+ }
+ release_firmware(fw);
+
+ return 0;
+}
+
+static int
+nvc0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ u32 r000260;
+ int ret;
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
+ if (ret == 0)
+ ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
+ nv_wr32(dev, 0x000260, r000260);
+
+ if (ret)
+ return ret;
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
int
nvc0_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+ int ret;
+
dev_priv->engine.graph.accel_blocked = true;
+
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ case 0xc3:
+ case 0xc4:
+ break;
+ default:
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ if (nouveau_noaccel != 0)
+ return 0;
+ break;
+ }
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ if (!pgraph->priv) {
+ ret = nvc0_graph_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pgraph->priv;
+
+ nvc0_graph_init_obj418880(dev);
+ nvc0_graph_init_regs(dev);
+ //nvc0_graph_init_unitplemented_magics(dev);
+ nvc0_graph_init_gpc_0(dev);
+ //nvc0_graph_init_unitplemented_c242(dev);
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nvc0_graph_init_units(dev);
+ nvc0_graph_init_gpc_1(dev);
+ nvc0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nvc0_graph_init_ctxctl(dev);
+ if (ret == 0)
+ dev_priv->engine.graph.accel_blocked = false;
return 0;
}
+static int
+nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nvc0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nvc0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ u32 trap = nv_rd32(dev, 0x400108);
+ NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
+
+ nv_wr32(dev, 0x409c20, ustat);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
new file mode 100644
index 00000000000..40e26f9c56c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TP_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r))
+#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_priv {
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tp_nr[GPC_MAX];
+ u8 tp_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+ u32 magicgpc980[4];
+ u32 magicgpc918;
+};
+
+struct nvc0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; // 0x418810 too
+ struct nouveau_gpuobj *unk40800c; // 0x419004 too
+ struct nouveau_gpuobj *unk418810; // 0x419848 too
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nvc0_grctx_generate(struct nouveau_channel *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
new file mode 100644
index 00000000000..b9e68b2d30a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -0,0 +1,2874 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 2) {}
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nvc0_grctx_generate_9097(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
+ nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
+ nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
+ /* in trace, right after 0x90c0, not here */
+ nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
+ nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct drm_device *dev)
+{
+ int i;
+
+ nv_wr32(dev, 0x404004, 0x00000000);
+ nv_wr32(dev, 0x404008, 0x00000000);
+ nv_wr32(dev, 0x40400c, 0x00000000);
+ nv_wr32(dev, 0x404010, 0x00000000);
+ nv_wr32(dev, 0x404014, 0x00000000);
+ nv_wr32(dev, 0x404018, 0x00000000);
+ nv_wr32(dev, 0x40401c, 0x00000000);
+ nv_wr32(dev, 0x404020, 0x00000000);
+ nv_wr32(dev, 0x404024, 0x00000000);
+ nv_wr32(dev, 0x404028, 0x00000000);
+ nv_wr32(dev, 0x40402c, 0x00000000);
+ nv_wr32(dev, 0x404044, 0x00000000);
+ nv_wr32(dev, 0x404094, 0x00000000);
+ nv_wr32(dev, 0x404098, 0x00000000);
+ nv_wr32(dev, 0x40409c, 0x00000000);
+ nv_wr32(dev, 0x4040a0, 0x00000000);
+ nv_wr32(dev, 0x4040a4, 0x00000000);
+ nv_wr32(dev, 0x4040a8, 0x00000000);
+ nv_wr32(dev, 0x4040ac, 0x00000000);
+ nv_wr32(dev, 0x4040b0, 0x00000000);
+ nv_wr32(dev, 0x4040b4, 0x00000000);
+ nv_wr32(dev, 0x4040b8, 0x00000000);
+ nv_wr32(dev, 0x4040bc, 0x00000000);
+ nv_wr32(dev, 0x4040c0, 0x00000000);
+ nv_wr32(dev, 0x4040c4, 0x00000000);
+ nv_wr32(dev, 0x4040c8, 0xf0000087);
+ nv_wr32(dev, 0x4040d4, 0x00000000);
+ nv_wr32(dev, 0x4040d8, 0x00000000);
+ nv_wr32(dev, 0x4040dc, 0x00000000);
+ nv_wr32(dev, 0x4040e0, 0x00000000);
+ nv_wr32(dev, 0x4040e4, 0x00000000);
+ nv_wr32(dev, 0x4040e8, 0x00001000);
+ nv_wr32(dev, 0x4040f8, 0x00000000);
+ nv_wr32(dev, 0x404130, 0x00000000);
+ nv_wr32(dev, 0x404134, 0x00000000);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x0000002e);
+ nv_wr32(dev, 0x404154, 0x00000400);
+ nv_wr32(dev, 0x404158, 0x00000200);
+ nv_wr32(dev, 0x404164, 0x00000055);
+ nv_wr32(dev, 0x404168, 0x00000000);
+ nv_wr32(dev, 0x404174, 0x00000000);
+ nv_wr32(dev, 0x404178, 0x00000000);
+ nv_wr32(dev, 0x40417c, 0x00000000);
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x00000000);
+ nv_wr32(dev, 0x404408, 0x00000000);
+ nv_wr32(dev, 0x40440c, 0x00000000);
+ nv_wr32(dev, 0x404410, 0x00000000);
+ nv_wr32(dev, 0x404414, 0x00000000);
+ nv_wr32(dev, 0x404418, 0x00000000);
+ nv_wr32(dev, 0x40441c, 0x00000000);
+ nv_wr32(dev, 0x404420, 0x00000000);
+ nv_wr32(dev, 0x404424, 0x00000000);
+ nv_wr32(dev, 0x404428, 0x00000000);
+ nv_wr32(dev, 0x40442c, 0x00000000);
+ nv_wr32(dev, 0x404430, 0x00000000);
+ nv_wr32(dev, 0x404434, 0x00000000);
+ nv_wr32(dev, 0x404438, 0x00000000);
+ nv_wr32(dev, 0x404460, 0x00000000);
+ nv_wr32(dev, 0x404464, 0x00000000);
+ nv_wr32(dev, 0x404468, 0x00ffffff);
+ nv_wr32(dev, 0x40446c, 0x00000000);
+ nv_wr32(dev, 0x404480, 0x00000001);
+ nv_wr32(dev, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x00000015);
+ nv_wr32(dev, 0x404608, 0x00000000);
+ nv_wr32(dev, 0x40460c, 0x00002e00);
+ nv_wr32(dev, 0x404610, 0x00000100);
+ nv_wr32(dev, 0x404618, 0x00000000);
+ nv_wr32(dev, 0x40461c, 0x00000000);
+ nv_wr32(dev, 0x404620, 0x00000000);
+ nv_wr32(dev, 0x404624, 0x00000000);
+ nv_wr32(dev, 0x404628, 0x00000000);
+ nv_wr32(dev, 0x40462c, 0x00000000);
+ nv_wr32(dev, 0x404630, 0x00000000);
+ nv_wr32(dev, 0x404634, 0x00000000);
+ nv_wr32(dev, 0x404638, 0x00000004);
+ nv_wr32(dev, 0x40463c, 0x00000000);
+ nv_wr32(dev, 0x404640, 0x00000000);
+ nv_wr32(dev, 0x404644, 0x00000000);
+ nv_wr32(dev, 0x404648, 0x00000000);
+ nv_wr32(dev, 0x40464c, 0x00000000);
+ nv_wr32(dev, 0x404650, 0x00000000);
+ nv_wr32(dev, 0x404654, 0x00000000);
+ nv_wr32(dev, 0x404658, 0x00000000);
+ nv_wr32(dev, 0x40465c, 0x007f0100);
+ nv_wr32(dev, 0x404660, 0x00000000);
+ nv_wr32(dev, 0x404664, 0x00000000);
+ nv_wr32(dev, 0x404668, 0x00000000);
+ nv_wr32(dev, 0x40466c, 0x00000000);
+ nv_wr32(dev, 0x404670, 0x00000000);
+ nv_wr32(dev, 0x404674, 0x00000000);
+ nv_wr32(dev, 0x404678, 0x00000000);
+ nv_wr32(dev, 0x40467c, 0x00000002);
+ nv_wr32(dev, 0x404680, 0x00000000);
+ nv_wr32(dev, 0x404684, 0x00000000);
+ nv_wr32(dev, 0x404688, 0x00000000);
+ nv_wr32(dev, 0x40468c, 0x00000000);
+ nv_wr32(dev, 0x404690, 0x00000000);
+ nv_wr32(dev, 0x404694, 0x00000000);
+ nv_wr32(dev, 0x404698, 0x00000000);
+ nv_wr32(dev, 0x40469c, 0x00000000);
+ nv_wr32(dev, 0x4046a0, 0x007f0080);
+ nv_wr32(dev, 0x4046a4, 0x00000000);
+ nv_wr32(dev, 0x4046a8, 0x00000000);
+ nv_wr32(dev, 0x4046ac, 0x00000000);
+ nv_wr32(dev, 0x4046b0, 0x00000000);
+ nv_wr32(dev, 0x4046b4, 0x00000000);
+ nv_wr32(dev, 0x4046b8, 0x00000000);
+ nv_wr32(dev, 0x4046bc, 0x00000000);
+ nv_wr32(dev, 0x4046c0, 0x00000000);
+ nv_wr32(dev, 0x4046c4, 0x00000000);
+ nv_wr32(dev, 0x4046c8, 0x00000000);
+ nv_wr32(dev, 0x4046cc, 0x00000000);
+ nv_wr32(dev, 0x4046d0, 0x00000000);
+ nv_wr32(dev, 0x4046d4, 0x00000000);
+ nv_wr32(dev, 0x4046d8, 0x00000000);
+ nv_wr32(dev, 0x4046dc, 0x00000000);
+ nv_wr32(dev, 0x4046e0, 0x00000000);
+ nv_wr32(dev, 0x4046e4, 0x00000000);
+ nv_wr32(dev, 0x4046e8, 0x00000000);
+ nv_wr32(dev, 0x4046f0, 0x00000000);
+ nv_wr32(dev, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x00000000);
+ nv_wr32(dev, 0x404704, 0x00000000);
+ nv_wr32(dev, 0x404708, 0x00000000);
+ nv_wr32(dev, 0x40470c, 0x00000000);
+ nv_wr32(dev, 0x404710, 0x00000000);
+ nv_wr32(dev, 0x404714, 0x00000000);
+ nv_wr32(dev, 0x404718, 0x00000000);
+ nv_wr32(dev, 0x40471c, 0x00000000);
+ nv_wr32(dev, 0x404720, 0x00000000);
+ nv_wr32(dev, 0x404724, 0x00000000);
+ nv_wr32(dev, 0x404728, 0x00000000);
+ nv_wr32(dev, 0x40472c, 0x00000000);
+ nv_wr32(dev, 0x404730, 0x00000000);
+ nv_wr32(dev, 0x404734, 0x00000100);
+ nv_wr32(dev, 0x404738, 0x00000000);
+ nv_wr32(dev, 0x40473c, 0x00000000);
+ nv_wr32(dev, 0x404740, 0x00000000);
+ nv_wr32(dev, 0x404744, 0x00000000);
+ nv_wr32(dev, 0x404748, 0x00000000);
+ nv_wr32(dev, 0x40474c, 0x00000000);
+ nv_wr32(dev, 0x404750, 0x00000000);
+ nv_wr32(dev, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ nv_wr32(dev, 0x405838, 0x00000000);
+ nv_wr32(dev, 0x405854, 0x00000000);
+ nv_wr32(dev, 0x405870, 0x00000001);
+ nv_wr32(dev, 0x405874, 0x00000001);
+ nv_wr32(dev, 0x405878, 0x00000001);
+ nv_wr32(dev, 0x40587c, 0x00000001);
+ nv_wr32(dev, 0x405a00, 0x00000000);
+ nv_wr32(dev, 0x405a04, 0x00000000);
+ nv_wr32(dev, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x000103c1);
+ nv_wr32(dev, 0x406028, 0x00000001);
+ nv_wr32(dev, 0x40602c, 0x00000001);
+ nv_wr32(dev, 0x406030, 0x00000001);
+ nv_wr32(dev, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x4064a8, 0x00000000);
+ nv_wr32(dev, 0x4064ac, 0x00003fff);
+ nv_wr32(dev, 0x4064b4, 0x00000000);
+ nv_wr32(dev, 0x4064b8, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x00000023);
+ nv_wr32(dev, 0x40780c, 0x0a418820);
+ nv_wr32(dev, 0x407810, 0x062080e6);
+ nv_wr32(dev, 0x407814, 0x020398a4);
+ nv_wr32(dev, 0x407818, 0x0e629062);
+ nv_wr32(dev, 0x40781c, 0x0a418820);
+ nv_wr32(dev, 0x407820, 0x000000e6);
+ nv_wr32(dev, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x00000000);
+ nv_wr32(dev, 0x408004, 0x00000000);
+ nv_wr32(dev, 0x408008, 0x00000018);
+ nv_wr32(dev, 0x40800c, 0x00000000);
+ nv_wr32(dev, 0x408010, 0x00000000);
+ nv_wr32(dev, 0x408014, 0x00000069);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ // ROPC_BROADCAST
+ nv_wr32(dev, 0x408800, 0x02802a3c);
+ nv_wr32(dev, 0x408804, 0x00000040);
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ nv_wr32(dev, 0x408900, 0x0080b801);
+ break;
+ case 0xc3:
+ case 0xc4:
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ break;
+ }
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ nv_wr32(dev, 0x40890c, 0x00000000);
+ nv_wr32(dev, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct drm_device *dev)
+{
+ int i;
+
+ // GPC_BROADCAST
+ nv_wr32(dev, 0x418380, 0x00000016);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x418408, 0x00000000);
+ nv_wr32(dev, 0x41840c, 0x00001008);
+ nv_wr32(dev, 0x418410, 0x0fff0fff);
+ nv_wr32(dev, 0x418414, 0x00200fff);
+ nv_wr32(dev, 0x418450, 0x00000000);
+ nv_wr32(dev, 0x418454, 0x00000000);
+ nv_wr32(dev, 0x418458, 0x00000000);
+ nv_wr32(dev, 0x41845c, 0x00000000);
+ nv_wr32(dev, 0x418460, 0x00000000);
+ nv_wr32(dev, 0x418464, 0x00000000);
+ nv_wr32(dev, 0x418468, 0x00000001);
+ nv_wr32(dev, 0x41846c, 0x00000000);
+ nv_wr32(dev, 0x418470, 0x00000000);
+ nv_wr32(dev, 0x418600, 0x0000001f);
+ nv_wr32(dev, 0x418684, 0x0000000f);
+ nv_wr32(dev, 0x418700, 0x00000002);
+ nv_wr32(dev, 0x418704, 0x00000080);
+ nv_wr32(dev, 0x418708, 0x00000000);
+ nv_wr32(dev, 0x41870c, 0x07c80000);
+ nv_wr32(dev, 0x418710, 0x00000000);
+ nv_wr32(dev, 0x418800, 0x0006860a);
+ nv_wr32(dev, 0x418808, 0x00000000);
+ nv_wr32(dev, 0x41880c, 0x00000000);
+ nv_wr32(dev, 0x418810, 0x00000000);
+ nv_wr32(dev, 0x418828, 0x00008442);
+ nv_wr32(dev, 0x418830, 0x00000001);
+ nv_wr32(dev, 0x4188d8, 0x00000008);
+ nv_wr32(dev, 0x4188e0, 0x01000000);
+ nv_wr32(dev, 0x4188e8, 0x00000000);
+ nv_wr32(dev, 0x4188ec, 0x00000000);
+ nv_wr32(dev, 0x4188f0, 0x00000000);
+ nv_wr32(dev, 0x4188f4, 0x00000000);
+ nv_wr32(dev, 0x4188f8, 0x00000000);
+ nv_wr32(dev, 0x4188fc, 0x00100000);
+ nv_wr32(dev, 0x41891c, 0x00ff00ff);
+ nv_wr32(dev, 0x418924, 0x00000000);
+ nv_wr32(dev, 0x418928, 0x00ffff00);
+ nv_wr32(dev, 0x41892c, 0x0000ff00);
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
+ nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
+ }
+ nv_wr32(dev, 0x418b00, 0x00000000);
+ nv_wr32(dev, 0x418b08, 0x0a418820);
+ nv_wr32(dev, 0x418b0c, 0x062080e6);
+ nv_wr32(dev, 0x418b10, 0x020398a4);
+ nv_wr32(dev, 0x418b14, 0x0e629062);
+ nv_wr32(dev, 0x418b18, 0x0a418820);
+ nv_wr32(dev, 0x418b1c, 0x000000e6);
+ nv_wr32(dev, 0x418bb8, 0x00000103);
+ nv_wr32(dev, 0x418c08, 0x00000001);
+ nv_wr32(dev, 0x418c10, 0x00000000);
+ nv_wr32(dev, 0x418c14, 0x00000000);
+ nv_wr32(dev, 0x418c18, 0x00000000);
+ nv_wr32(dev, 0x418c1c, 0x00000000);
+ nv_wr32(dev, 0x418c20, 0x00000000);
+ nv_wr32(dev, 0x418c24, 0x00000000);
+ nv_wr32(dev, 0x418c28, 0x00000000);
+ nv_wr32(dev, 0x418c2c, 0x00000000);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x00000001);
+ nv_wr32(dev, 0x419000, 0x00000780);
+ nv_wr32(dev, 0x419004, 0x00000000);
+ nv_wr32(dev, 0x419008, 0x00000000);
+ nv_wr32(dev, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ // GPC_BROADCAST.TP_BROADCAST
+ nv_wr32(dev, 0x419848, 0x00000000);
+ nv_wr32(dev, 0x419864, 0x0000012a);
+ nv_wr32(dev, 0x419888, 0x00000000);
+ nv_wr32(dev, 0x419a00, 0x000001f0);
+ nv_wr32(dev, 0x419a04, 0x00000001);
+ nv_wr32(dev, 0x419a08, 0x00000023);
+ nv_wr32(dev, 0x419a0c, 0x00020000);
+ nv_wr32(dev, 0x419a10, 0x00000000);
+ nv_wr32(dev, 0x419a14, 0x00000200);
+ nv_wr32(dev, 0x419a1c, 0x00000000);
+ nv_wr32(dev, 0x419a20, 0x00000800);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3
+ nv_wr32(dev, 0x419b00, 0x0a418820);
+ nv_wr32(dev, 0x419b04, 0x062080e6);
+ nv_wr32(dev, 0x419b08, 0x020398a4);
+ nv_wr32(dev, 0x419b0c, 0x0e629062);
+ nv_wr32(dev, 0x419b10, 0x0a418820);
+ nv_wr32(dev, 0x419b14, 0x000000e6);
+ nv_wr32(dev, 0x419bd0, 0x00900103);
+ nv_wr32(dev, 0x419be0, 0x00000001);
+ nv_wr32(dev, 0x419be4, 0x00000000);
+ nv_wr32(dev, 0x419c00, 0x00000002);
+ nv_wr32(dev, 0x419c04, 0x00000006);
+ nv_wr32(dev, 0x419c08, 0x00000002);
+ nv_wr32(dev, 0x419c20, 0x00000000);
+ nv_wr32(dev, 0x419cbc, 0x28137606);
+ nv_wr32(dev, 0x419ce8, 0x00000000);
+ nv_wr32(dev, 0x419cf4, 0x00000183);
+ nv_wr32(dev, 0x419d20, 0x02180000);
+ nv_wr32(dev, 0x419d24, 0x00001fff);
+ nv_wr32(dev, 0x419e04, 0x00000000);
+ nv_wr32(dev, 0x419e08, 0x00000000);
+ nv_wr32(dev, 0x419e0c, 0x00000000);
+ nv_wr32(dev, 0x419e10, 0x00000002);
+ nv_wr32(dev, 0x419e44, 0x001beff2);
+ nv_wr32(dev, 0x419e48, 0x00000000);
+ nv_wr32(dev, 0x419e4c, 0x0000000f);
+ nv_wr32(dev, 0x419e50, 0x00000000);
+ nv_wr32(dev, 0x419e54, 0x00000000);
+ nv_wr32(dev, 0x419e58, 0x00000000);
+ nv_wr32(dev, 0x419e5c, 0x00000000);
+ nv_wr32(dev, 0x419e60, 0x00000000);
+ nv_wr32(dev, 0x419e64, 0x00000000);
+ nv_wr32(dev, 0x419e68, 0x00000000);
+ nv_wr32(dev, 0x419e6c, 0x00000000);
+ nv_wr32(dev, 0x419e70, 0x00000000);
+ nv_wr32(dev, 0x419e74, 0x00000000);
+ nv_wr32(dev, 0x419e78, 0x00000000);
+ nv_wr32(dev, 0x419e7c, 0x00000000);
+ nv_wr32(dev, 0x419e80, 0x00000000);
+ nv_wr32(dev, 0x419e84, 0x00000000);
+ nv_wr32(dev, 0x419e88, 0x00000000);
+ nv_wr32(dev, 0x419e8c, 0x00000000);
+ nv_wr32(dev, 0x419e90, 0x00000000);
+ nv_wr32(dev, 0x419e98, 0x00000000);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x419ee0, 0x00011110);
+ nv_wr32(dev, 0x419f50, 0x00000000);
+ nv_wr32(dev, 0x419f54, 0x00000000);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int i, gpc, tp, id;
+ u32 r000260, tmp;
+
+ r000260 = nv_rd32(dev, 0x000260);
+ nv_wr32(dev, 0x000260, r000260 & ~1);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nvc0_grctx_generate_dispatch(dev);
+ nvc0_grctx_generate_macro(dev);
+ nvc0_grctx_generate_m2mf(dev);
+ nvc0_grctx_generate_unk47xx(dev);
+ nvc0_grctx_generate_shaders(dev);
+ nvc0_grctx_generate_unk60xx(dev);
+ nvc0_grctx_generate_unk64xx(dev);
+ nvc0_grctx_generate_tpbus(dev);
+ nvc0_grctx_generate_ccache(dev);
+ nvc0_grctx_generate_rop(dev);
+ nvc0_grctx_generate_gpc(dev);
+ nvc0_grctx_generate_tp(dev);
+
+ nv_wr32(dev, 0x404154, 0x00000000);
+
+ /* fuc "mmio list" writes */
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
+ }
+
+ for (tp = 0, id = 0; tp < 4; tp++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tp <= priv->tp_nr[gpc]) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
+ id++;
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tp_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x00000000);
+ nv_wr32(dev, 0x405874, 0x00000000);
+ nv_wr32(dev, 0x406030, 0x00000000);
+ nv_wr32(dev, 0x405878, 0x00000000);
+ nv_wr32(dev, 0x406034, 0x00000000);
+ nv_wr32(dev, 0x40587c, 0x00000000);
+
+ if (1) {
+ const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+ u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
+ u8 tpnr[GPC_MAX];
+ u8 data[32];
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ memset(data, 0x1f, sizeof(data));
+
+ gpc = -1;
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+ data[tp] = gpc;
+ }
+
+ for (i = 0; i < max / 4; i++)
+ nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+ }
+
+ if (1) {
+ u32 data[6] = {}, data2[2] = {};
+ u8 tpnr[GPC_MAX];
+ u8 shift, ntpcv;
+
+ /* calculate first set of magics */
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+
+ data[tp / 6] |= gpc << ((tp % 6) * 5);
+ }
+
+ for (; tp < 32; tp++)
+ data[tp / 6] |= 7 << ((tp % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tp_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = (ntpcv << 16);
+ data2[0] |= (shift << 21);
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ // GPC_BROADCAST
+ nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ // GPC_BROADCAST.TP_BROADCAST
+ nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr |
+ data2[0]);
+ nv_wr32(dev, 0x419be4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
+
+ // UNK78xx
+ nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+ }
+
+ if (1) {
+ u32 tp_mask = 0, tp_set = 0;
+ u8 tpnr[GPC_MAX];
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
+
+ gpc = -1;
+ for (i = 0, gpc = -1; i < 32; i++) {
+ int ltp = i * (priv->tp_total - 1) / 32;
+
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+ tp_set |= 1 << ((gpc * 8) + tp);
+
+ do {
+ nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ } while (ltp == (++i * (priv->tp_total - 1) / 32));
+ i--;
+ }
+ }
+
+ nv_wr32(dev, 0x400208, 0x80000000);
+
+ nv_icmd(dev, 0x00001000, 0x00000004);
+ nv_icmd(dev, 0x000000a9, 0x0000ffff);
+ nv_icmd(dev, 0x00000038, 0x0fac6881);
+ nv_icmd(dev, 0x0000003d, 0x00000001);
+ nv_icmd(dev, 0x000000e8, 0x00000400);
+ nv_icmd(dev, 0x000000e9, 0x00000400);
+ nv_icmd(dev, 0x000000ea, 0x00000400);
+ nv_icmd(dev, 0x000000eb, 0x00000400);
+ nv_icmd(dev, 0x000000ec, 0x00000400);
+ nv_icmd(dev, 0x000000ed, 0x00000400);
+ nv_icmd(dev, 0x000000ee, 0x00000400);
+ nv_icmd(dev, 0x000000ef, 0x00000400);
+ nv_icmd(dev, 0x00000078, 0x00000300);
+ nv_icmd(dev, 0x00000079, 0x00000300);
+ nv_icmd(dev, 0x0000007a, 0x00000300);
+ nv_icmd(dev, 0x0000007b, 0x00000300);
+ nv_icmd(dev, 0x0000007c, 0x00000300);
+ nv_icmd(dev, 0x0000007d, 0x00000300);
+ nv_icmd(dev, 0x0000007e, 0x00000300);
+ nv_icmd(dev, 0x0000007f, 0x00000300);
+ nv_icmd(dev, 0x00000050, 0x00000011);
+ nv_icmd(dev, 0x00000058, 0x00000008);
+ nv_icmd(dev, 0x00000059, 0x00000008);
+ nv_icmd(dev, 0x0000005a, 0x00000008);
+ nv_icmd(dev, 0x0000005b, 0x00000008);
+ nv_icmd(dev, 0x0000005c, 0x00000008);
+ nv_icmd(dev, 0x0000005d, 0x00000008);
+ nv_icmd(dev, 0x0000005e, 0x00000008);
+ nv_icmd(dev, 0x0000005f, 0x00000008);
+ nv_icmd(dev, 0x00000208, 0x00000001);
+ nv_icmd(dev, 0x00000209, 0x00000001);
+ nv_icmd(dev, 0x0000020a, 0x00000001);
+ nv_icmd(dev, 0x0000020b, 0x00000001);
+ nv_icmd(dev, 0x0000020c, 0x00000001);
+ nv_icmd(dev, 0x0000020d, 0x00000001);
+ nv_icmd(dev, 0x0000020e, 0x00000001);
+ nv_icmd(dev, 0x0000020f, 0x00000001);
+ nv_icmd(dev, 0x00000081, 0x00000001);
+ nv_icmd(dev, 0x00000085, 0x00000004);
+ nv_icmd(dev, 0x00000088, 0x00000400);
+ nv_icmd(dev, 0x00000090, 0x00000300);
+ nv_icmd(dev, 0x00000098, 0x00001001);
+ nv_icmd(dev, 0x000000e3, 0x00000001);
+ nv_icmd(dev, 0x000000da, 0x00000001);
+ nv_icmd(dev, 0x000000f8, 0x00000003);
+ nv_icmd(dev, 0x000000fa, 0x00000001);
+ nv_icmd(dev, 0x0000009f, 0x0000ffff);
+ nv_icmd(dev, 0x000000a0, 0x0000ffff);
+ nv_icmd(dev, 0x000000a1, 0x0000ffff);
+ nv_icmd(dev, 0x000000a2, 0x0000ffff);
+ nv_icmd(dev, 0x000000b1, 0x00000001);
+ nv_icmd(dev, 0x000000b2, 0x00000000);
+ nv_icmd(dev, 0x000000b3, 0x00000000);
+ nv_icmd(dev, 0x000000b4, 0x00000000);
+ nv_icmd(dev, 0x000000b5, 0x00000000);
+ nv_icmd(dev, 0x000000b6, 0x00000000);
+ nv_icmd(dev, 0x000000b7, 0x00000000);
+ nv_icmd(dev, 0x000000b8, 0x00000000);
+ nv_icmd(dev, 0x000000b9, 0x00000000);
+ nv_icmd(dev, 0x000000ba, 0x00000000);
+ nv_icmd(dev, 0x000000bb, 0x00000000);
+ nv_icmd(dev, 0x000000bc, 0x00000000);
+ nv_icmd(dev, 0x000000bd, 0x00000000);
+ nv_icmd(dev, 0x000000be, 0x00000000);
+ nv_icmd(dev, 0x000000bf, 0x00000000);
+ nv_icmd(dev, 0x000000c0, 0x00000000);
+ nv_icmd(dev, 0x000000c1, 0x00000000);
+ nv_icmd(dev, 0x000000c2, 0x00000000);
+ nv_icmd(dev, 0x000000c3, 0x00000000);
+ nv_icmd(dev, 0x000000c4, 0x00000000);
+ nv_icmd(dev, 0x000000c5, 0x00000000);
+ nv_icmd(dev, 0x000000c6, 0x00000000);
+ nv_icmd(dev, 0x000000c7, 0x00000000);
+ nv_icmd(dev, 0x000000c8, 0x00000000);
+ nv_icmd(dev, 0x000000c9, 0x00000000);
+ nv_icmd(dev, 0x000000ca, 0x00000000);
+ nv_icmd(dev, 0x000000cb, 0x00000000);
+ nv_icmd(dev, 0x000000cc, 0x00000000);
+ nv_icmd(dev, 0x000000cd, 0x00000000);
+ nv_icmd(dev, 0x000000ce, 0x00000000);
+ nv_icmd(dev, 0x000000cf, 0x00000000);
+ nv_icmd(dev, 0x000000d0, 0x00000000);
+ nv_icmd(dev, 0x000000d1, 0x00000000);
+ nv_icmd(dev, 0x000000d2, 0x00000000);
+ nv_icmd(dev, 0x000000d3, 0x00000000);
+ nv_icmd(dev, 0x000000d4, 0x00000000);
+ nv_icmd(dev, 0x000000d5, 0x00000000);
+ nv_icmd(dev, 0x000000d6, 0x00000000);
+ nv_icmd(dev, 0x000000d7, 0x00000000);
+ nv_icmd(dev, 0x000000d8, 0x00000000);
+ nv_icmd(dev, 0x000000d9, 0x00000000);
+ nv_icmd(dev, 0x00000210, 0x00000040);
+ nv_icmd(dev, 0x00000211, 0x00000040);
+ nv_icmd(dev, 0x00000212, 0x00000040);
+ nv_icmd(dev, 0x00000213, 0x00000040);
+ nv_icmd(dev, 0x00000214, 0x00000040);
+ nv_icmd(dev, 0x00000215, 0x00000040);
+ nv_icmd(dev, 0x00000216, 0x00000040);
+ nv_icmd(dev, 0x00000217, 0x00000040);
+ nv_icmd(dev, 0x00000218, 0x0000c080);
+ nv_icmd(dev, 0x00000219, 0x0000c080);
+ nv_icmd(dev, 0x0000021a, 0x0000c080);
+ nv_icmd(dev, 0x0000021b, 0x0000c080);
+ nv_icmd(dev, 0x0000021c, 0x0000c080);
+ nv_icmd(dev, 0x0000021d, 0x0000c080);
+ nv_icmd(dev, 0x0000021e, 0x0000c080);
+ nv_icmd(dev, 0x0000021f, 0x0000c080);
+ nv_icmd(dev, 0x000000ad, 0x0000013e);
+ nv_icmd(dev, 0x000000e1, 0x00000010);
+ nv_icmd(dev, 0x00000290, 0x00000000);
+ nv_icmd(dev, 0x00000291, 0x00000000);
+ nv_icmd(dev, 0x00000292, 0x00000000);
+ nv_icmd(dev, 0x00000293, 0x00000000);
+ nv_icmd(dev, 0x00000294, 0x00000000);
+ nv_icmd(dev, 0x00000295, 0x00000000);
+ nv_icmd(dev, 0x00000296, 0x00000000);
+ nv_icmd(dev, 0x00000297, 0x00000000);
+ nv_icmd(dev, 0x00000298, 0x00000000);
+ nv_icmd(dev, 0x00000299, 0x00000000);
+ nv_icmd(dev, 0x0000029a, 0x00000000);
+ nv_icmd(dev, 0x0000029b, 0x00000000);
+ nv_icmd(dev, 0x0000029c, 0x00000000);
+ nv_icmd(dev, 0x0000029d, 0x00000000);
+ nv_icmd(dev, 0x0000029e, 0x00000000);
+ nv_icmd(dev, 0x0000029f, 0x00000000);
+ nv_icmd(dev, 0x000003b0, 0x00000000);
+ nv_icmd(dev, 0x000003b1, 0x00000000);
+ nv_icmd(dev, 0x000003b2, 0x00000000);
+ nv_icmd(dev, 0x000003b3, 0x00000000);
+ nv_icmd(dev, 0x000003b4, 0x00000000);
+ nv_icmd(dev, 0x000003b5, 0x00000000);
+ nv_icmd(dev, 0x000003b6, 0x00000000);
+ nv_icmd(dev, 0x000003b7, 0x00000000);
+ nv_icmd(dev, 0x000003b8, 0x00000000);
+ nv_icmd(dev, 0x000003b9, 0x00000000);
+ nv_icmd(dev, 0x000003ba, 0x00000000);
+ nv_icmd(dev, 0x000003bb, 0x00000000);
+ nv_icmd(dev, 0x000003bc, 0x00000000);
+ nv_icmd(dev, 0x000003bd, 0x00000000);
+ nv_icmd(dev, 0x000003be, 0x00000000);
+ nv_icmd(dev, 0x000003bf, 0x00000000);
+ nv_icmd(dev, 0x000002a0, 0x00000000);
+ nv_icmd(dev, 0x000002a1, 0x00000000);
+ nv_icmd(dev, 0x000002a2, 0x00000000);
+ nv_icmd(dev, 0x000002a3, 0x00000000);
+ nv_icmd(dev, 0x000002a4, 0x00000000);
+ nv_icmd(dev, 0x000002a5, 0x00000000);
+ nv_icmd(dev, 0x000002a6, 0x00000000);
+ nv_icmd(dev, 0x000002a7, 0x00000000);
+ nv_icmd(dev, 0x000002a8, 0x00000000);
+ nv_icmd(dev, 0x000002a9, 0x00000000);
+ nv_icmd(dev, 0x000002aa, 0x00000000);
+ nv_icmd(dev, 0x000002ab, 0x00000000);
+ nv_icmd(dev, 0x000002ac, 0x00000000);
+ nv_icmd(dev, 0x000002ad, 0x00000000);
+ nv_icmd(dev, 0x000002ae, 0x00000000);
+ nv_icmd(dev, 0x000002af, 0x00000000);
+ nv_icmd(dev, 0x00000420, 0x00000000);
+ nv_icmd(dev, 0x00000421, 0x00000000);
+ nv_icmd(dev, 0x00000422, 0x00000000);
+ nv_icmd(dev, 0x00000423, 0x00000000);
+ nv_icmd(dev, 0x00000424, 0x00000000);
+ nv_icmd(dev, 0x00000425, 0x00000000);
+ nv_icmd(dev, 0x00000426, 0x00000000);
+ nv_icmd(dev, 0x00000427, 0x00000000);
+ nv_icmd(dev, 0x00000428, 0x00000000);
+ nv_icmd(dev, 0x00000429, 0x00000000);
+ nv_icmd(dev, 0x0000042a, 0x00000000);
+ nv_icmd(dev, 0x0000042b, 0x00000000);
+ nv_icmd(dev, 0x0000042c, 0x00000000);
+ nv_icmd(dev, 0x0000042d, 0x00000000);
+ nv_icmd(dev, 0x0000042e, 0x00000000);
+ nv_icmd(dev, 0x0000042f, 0x00000000);
+ nv_icmd(dev, 0x000002b0, 0x00000000);
+ nv_icmd(dev, 0x000002b1, 0x00000000);
+ nv_icmd(dev, 0x000002b2, 0x00000000);
+ nv_icmd(dev, 0x000002b3, 0x00000000);
+ nv_icmd(dev, 0x000002b4, 0x00000000);
+ nv_icmd(dev, 0x000002b5, 0x00000000);
+ nv_icmd(dev, 0x000002b6, 0x00000000);
+ nv_icmd(dev, 0x000002b7, 0x00000000);
+ nv_icmd(dev, 0x000002b8, 0x00000000);
+ nv_icmd(dev, 0x000002b9, 0x00000000);
+ nv_icmd(dev, 0x000002ba, 0x00000000);
+ nv_icmd(dev, 0x000002bb, 0x00000000);
+ nv_icmd(dev, 0x000002bc, 0x00000000);
+ nv_icmd(dev, 0x000002bd, 0x00000000);
+ nv_icmd(dev, 0x000002be, 0x00000000);
+ nv_icmd(dev, 0x000002bf, 0x00000000);
+ nv_icmd(dev, 0x00000430, 0x00000000);
+ nv_icmd(dev, 0x00000431, 0x00000000);
+ nv_icmd(dev, 0x00000432, 0x00000000);
+ nv_icmd(dev, 0x00000433, 0x00000000);
+ nv_icmd(dev, 0x00000434, 0x00000000);
+ nv_icmd(dev, 0x00000435, 0x00000000);
+ nv_icmd(dev, 0x00000436, 0x00000000);
+ nv_icmd(dev, 0x00000437, 0x00000000);
+ nv_icmd(dev, 0x00000438, 0x00000000);
+ nv_icmd(dev, 0x00000439, 0x00000000);
+ nv_icmd(dev, 0x0000043a, 0x00000000);
+ nv_icmd(dev, 0x0000043b, 0x00000000);
+ nv_icmd(dev, 0x0000043c, 0x00000000);
+ nv_icmd(dev, 0x0000043d, 0x00000000);
+ nv_icmd(dev, 0x0000043e, 0x00000000);
+ nv_icmd(dev, 0x0000043f, 0x00000000);
+ nv_icmd(dev, 0x000002c0, 0x00000000);
+ nv_icmd(dev, 0x000002c1, 0x00000000);
+ nv_icmd(dev, 0x000002c2, 0x00000000);
+ nv_icmd(dev, 0x000002c3, 0x00000000);
+ nv_icmd(dev, 0x000002c4, 0x00000000);
+ nv_icmd(dev, 0x000002c5, 0x00000000);
+ nv_icmd(dev, 0x000002c6, 0x00000000);
+ nv_icmd(dev, 0x000002c7, 0x00000000);
+ nv_icmd(dev, 0x000002c8, 0x00000000);
+ nv_icmd(dev, 0x000002c9, 0x00000000);
+ nv_icmd(dev, 0x000002ca, 0x00000000);
+ nv_icmd(dev, 0x000002cb, 0x00000000);
+ nv_icmd(dev, 0x000002cc, 0x00000000);
+ nv_icmd(dev, 0x000002cd, 0x00000000);
+ nv_icmd(dev, 0x000002ce, 0x00000000);
+ nv_icmd(dev, 0x000002cf, 0x00000000);
+ nv_icmd(dev, 0x000004d0, 0x00000000);
+ nv_icmd(dev, 0x000004d1, 0x00000000);
+ nv_icmd(dev, 0x000004d2, 0x00000000);
+ nv_icmd(dev, 0x000004d3, 0x00000000);
+ nv_icmd(dev, 0x000004d4, 0x00000000);
+ nv_icmd(dev, 0x000004d5, 0x00000000);
+ nv_icmd(dev, 0x000004d6, 0x00000000);
+ nv_icmd(dev, 0x000004d7, 0x00000000);
+ nv_icmd(dev, 0x000004d8, 0x00000000);
+ nv_icmd(dev, 0x000004d9, 0x00000000);
+ nv_icmd(dev, 0x000004da, 0x00000000);
+ nv_icmd(dev, 0x000004db, 0x00000000);
+ nv_icmd(dev, 0x000004dc, 0x00000000);
+ nv_icmd(dev, 0x000004dd, 0x00000000);
+ nv_icmd(dev, 0x000004de, 0x00000000);
+ nv_icmd(dev, 0x000004df, 0x00000000);
+ nv_icmd(dev, 0x00000720, 0x00000000);
+ nv_icmd(dev, 0x00000721, 0x00000000);
+ nv_icmd(dev, 0x00000722, 0x00000000);
+ nv_icmd(dev, 0x00000723, 0x00000000);
+ nv_icmd(dev, 0x00000724, 0x00000000);
+ nv_icmd(dev, 0x00000725, 0x00000000);
+ nv_icmd(dev, 0x00000726, 0x00000000);
+ nv_icmd(dev, 0x00000727, 0x00000000);
+ nv_icmd(dev, 0x00000728, 0x00000000);
+ nv_icmd(dev, 0x00000729, 0x00000000);
+ nv_icmd(dev, 0x0000072a, 0x00000000);
+ nv_icmd(dev, 0x0000072b, 0x00000000);
+ nv_icmd(dev, 0x0000072c, 0x00000000);
+ nv_icmd(dev, 0x0000072d, 0x00000000);
+ nv_icmd(dev, 0x0000072e, 0x00000000);
+ nv_icmd(dev, 0x0000072f, 0x00000000);
+ nv_icmd(dev, 0x000008c0, 0x00000000);
+ nv_icmd(dev, 0x000008c1, 0x00000000);
+ nv_icmd(dev, 0x000008c2, 0x00000000);
+ nv_icmd(dev, 0x000008c3, 0x00000000);
+ nv_icmd(dev, 0x000008c4, 0x00000000);
+ nv_icmd(dev, 0x000008c5, 0x00000000);
+ nv_icmd(dev, 0x000008c6, 0x00000000);
+ nv_icmd(dev, 0x000008c7, 0x00000000);
+ nv_icmd(dev, 0x000008c8, 0x00000000);
+ nv_icmd(dev, 0x000008c9, 0x00000000);
+ nv_icmd(dev, 0x000008ca, 0x00000000);
+ nv_icmd(dev, 0x000008cb, 0x00000000);
+ nv_icmd(dev, 0x000008cc, 0x00000000);
+ nv_icmd(dev, 0x000008cd, 0x00000000);
+ nv_icmd(dev, 0x000008ce, 0x00000000);
+ nv_icmd(dev, 0x000008cf, 0x00000000);
+ nv_icmd(dev, 0x00000890, 0x00000000);
+ nv_icmd(dev, 0x00000891, 0x00000000);
+ nv_icmd(dev, 0x00000892, 0x00000000);
+ nv_icmd(dev, 0x00000893, 0x00000000);
+ nv_icmd(dev, 0x00000894, 0x00000000);
+ nv_icmd(dev, 0x00000895, 0x00000000);
+ nv_icmd(dev, 0x00000896, 0x00000000);
+ nv_icmd(dev, 0x00000897, 0x00000000);
+ nv_icmd(dev, 0x00000898, 0x00000000);
+ nv_icmd(dev, 0x00000899, 0x00000000);
+ nv_icmd(dev, 0x0000089a, 0x00000000);
+ nv_icmd(dev, 0x0000089b, 0x00000000);
+ nv_icmd(dev, 0x0000089c, 0x00000000);
+ nv_icmd(dev, 0x0000089d, 0x00000000);
+ nv_icmd(dev, 0x0000089e, 0x00000000);
+ nv_icmd(dev, 0x0000089f, 0x00000000);
+ nv_icmd(dev, 0x000008e0, 0x00000000);
+ nv_icmd(dev, 0x000008e1, 0x00000000);
+ nv_icmd(dev, 0x000008e2, 0x00000000);
+ nv_icmd(dev, 0x000008e3, 0x00000000);
+ nv_icmd(dev, 0x000008e4, 0x00000000);
+ nv_icmd(dev, 0x000008e5, 0x00000000);
+ nv_icmd(dev, 0x000008e6, 0x00000000);
+ nv_icmd(dev, 0x000008e7, 0x00000000);
+ nv_icmd(dev, 0x000008e8, 0x00000000);
+ nv_icmd(dev, 0x000008e9, 0x00000000);
+ nv_icmd(dev, 0x000008ea, 0x00000000);
+ nv_icmd(dev, 0x000008eb, 0x00000000);
+ nv_icmd(dev, 0x000008ec, 0x00000000);
+ nv_icmd(dev, 0x000008ed, 0x00000000);
+ nv_icmd(dev, 0x000008ee, 0x00000000);
+ nv_icmd(dev, 0x000008ef, 0x00000000);
+ nv_icmd(dev, 0x000008a0, 0x00000000);
+ nv_icmd(dev, 0x000008a1, 0x00000000);
+ nv_icmd(dev, 0x000008a2, 0x00000000);
+ nv_icmd(dev, 0x000008a3, 0x00000000);
+ nv_icmd(dev, 0x000008a4, 0x00000000);
+ nv_icmd(dev, 0x000008a5, 0x00000000);
+ nv_icmd(dev, 0x000008a6, 0x00000000);
+ nv_icmd(dev, 0x000008a7, 0x00000000);
+ nv_icmd(dev, 0x000008a8, 0x00000000);
+ nv_icmd(dev, 0x000008a9, 0x00000000);
+ nv_icmd(dev, 0x000008aa, 0x00000000);
+ nv_icmd(dev, 0x000008ab, 0x00000000);
+ nv_icmd(dev, 0x000008ac, 0x00000000);
+ nv_icmd(dev, 0x000008ad, 0x00000000);
+ nv_icmd(dev, 0x000008ae, 0x00000000);
+ nv_icmd(dev, 0x000008af, 0x00000000);
+ nv_icmd(dev, 0x000008f0, 0x00000000);
+ nv_icmd(dev, 0x000008f1, 0x00000000);
+ nv_icmd(dev, 0x000008f2, 0x00000000);
+ nv_icmd(dev, 0x000008f3, 0x00000000);
+ nv_icmd(dev, 0x000008f4, 0x00000000);
+ nv_icmd(dev, 0x000008f5, 0x00000000);
+ nv_icmd(dev, 0x000008f6, 0x00000000);
+ nv_icmd(dev, 0x000008f7, 0x00000000);
+ nv_icmd(dev, 0x000008f8, 0x00000000);
+ nv_icmd(dev, 0x000008f9, 0x00000000);
+ nv_icmd(dev, 0x000008fa, 0x00000000);
+ nv_icmd(dev, 0x000008fb, 0x00000000);
+ nv_icmd(dev, 0x000008fc, 0x00000000);
+ nv_icmd(dev, 0x000008fd, 0x00000000);
+ nv_icmd(dev, 0x000008fe, 0x00000000);
+ nv_icmd(dev, 0x000008ff, 0x00000000);
+ nv_icmd(dev, 0x0000094c, 0x000000ff);
+ nv_icmd(dev, 0x0000094d, 0xffffffff);
+ nv_icmd(dev, 0x0000094e, 0x00000002);
+ nv_icmd(dev, 0x000002ec, 0x00000001);
+ nv_icmd(dev, 0x00000303, 0x00000001);
+ nv_icmd(dev, 0x000002e6, 0x00000001);
+ nv_icmd(dev, 0x00000466, 0x00000052);
+ nv_icmd(dev, 0x00000301, 0x3f800000);
+ nv_icmd(dev, 0x00000304, 0x30201000);
+ nv_icmd(dev, 0x00000305, 0x70605040);
+ nv_icmd(dev, 0x00000306, 0xb8a89888);
+ nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x0000030a, 0x00ffff00);
+ nv_icmd(dev, 0x0000030b, 0x0000001a);
+ nv_icmd(dev, 0x0000030c, 0x00000001);
+ nv_icmd(dev, 0x00000318, 0x00000001);
+ nv_icmd(dev, 0x00000340, 0x00000000);
+ nv_icmd(dev, 0x00000375, 0x00000001);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x0000037d, 0x00000006);
+ nv_icmd(dev, 0x000003a0, 0x00000002);
+ nv_icmd(dev, 0x000003aa, 0x00000001);
+ nv_icmd(dev, 0x000003a9, 0x00000001);
+ nv_icmd(dev, 0x00000380, 0x00000001);
+ nv_icmd(dev, 0x00000360, 0x00000040);
+ nv_icmd(dev, 0x00000366, 0x00000000);
+ nv_icmd(dev, 0x00000367, 0x00000000);
+ nv_icmd(dev, 0x00000368, 0x00001fff);
+ nv_icmd(dev, 0x00000370, 0x00000000);
+ nv_icmd(dev, 0x00000371, 0x00000000);
+ nv_icmd(dev, 0x00000372, 0x003fffff);
+ nv_icmd(dev, 0x0000037a, 0x00000012);
+ nv_icmd(dev, 0x000005e0, 0x00000022);
+ nv_icmd(dev, 0x000005e1, 0x00000022);
+ nv_icmd(dev, 0x000005e2, 0x00000022);
+ nv_icmd(dev, 0x000005e3, 0x00000022);
+ nv_icmd(dev, 0x000005e4, 0x00000022);
+ nv_icmd(dev, 0x00000619, 0x00000003);
+ nv_icmd(dev, 0x00000811, 0x00000003);
+ nv_icmd(dev, 0x00000812, 0x00000004);
+ nv_icmd(dev, 0x00000813, 0x00000006);
+ nv_icmd(dev, 0x00000814, 0x00000008);
+ nv_icmd(dev, 0x00000815, 0x0000000b);
+ nv_icmd(dev, 0x00000800, 0x00000001);
+ nv_icmd(dev, 0x00000801, 0x00000001);
+ nv_icmd(dev, 0x00000802, 0x00000001);
+ nv_icmd(dev, 0x00000803, 0x00000001);
+ nv_icmd(dev, 0x00000804, 0x00000001);
+ nv_icmd(dev, 0x00000805, 0x00000001);
+ nv_icmd(dev, 0x00000632, 0x00000001);
+ nv_icmd(dev, 0x00000633, 0x00000002);
+ nv_icmd(dev, 0x00000634, 0x00000003);
+ nv_icmd(dev, 0x00000635, 0x00000004);
+ nv_icmd(dev, 0x00000654, 0x3f800000);
+ nv_icmd(dev, 0x00000657, 0x3f800000);
+ nv_icmd(dev, 0x00000655, 0x3f800000);
+ nv_icmd(dev, 0x00000656, 0x3f800000);
+ nv_icmd(dev, 0x000006cd, 0x3f800000);
+ nv_icmd(dev, 0x000007f5, 0x3f800000);
+ nv_icmd(dev, 0x000007dc, 0x39291909);
+ nv_icmd(dev, 0x000007dd, 0x79695949);
+ nv_icmd(dev, 0x000007de, 0xb9a99989);
+ nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007e8, 0x00003210);
+ nv_icmd(dev, 0x000007e9, 0x00007654);
+ nv_icmd(dev, 0x000007ea, 0x00000098);
+ nv_icmd(dev, 0x000007ec, 0x39291909);
+ nv_icmd(dev, 0x000007ed, 0x79695949);
+ nv_icmd(dev, 0x000007ee, 0xb9a99989);
+ nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007f0, 0x00003210);
+ nv_icmd(dev, 0x000007f1, 0x00007654);
+ nv_icmd(dev, 0x000007f2, 0x00000098);
+ nv_icmd(dev, 0x000005a5, 0x00000001);
+ nv_icmd(dev, 0x00000980, 0x00000000);
+ nv_icmd(dev, 0x00000981, 0x00000000);
+ nv_icmd(dev, 0x00000982, 0x00000000);
+ nv_icmd(dev, 0x00000983, 0x00000000);
+ nv_icmd(dev, 0x00000984, 0x00000000);
+ nv_icmd(dev, 0x00000985, 0x00000000);
+ nv_icmd(dev, 0x00000986, 0x00000000);
+ nv_icmd(dev, 0x00000987, 0x00000000);
+ nv_icmd(dev, 0x00000988, 0x00000000);
+ nv_icmd(dev, 0x00000989, 0x00000000);
+ nv_icmd(dev, 0x0000098a, 0x00000000);
+ nv_icmd(dev, 0x0000098b, 0x00000000);
+ nv_icmd(dev, 0x0000098c, 0x00000000);
+ nv_icmd(dev, 0x0000098d, 0x00000000);
+ nv_icmd(dev, 0x0000098e, 0x00000000);
+ nv_icmd(dev, 0x0000098f, 0x00000000);
+ nv_icmd(dev, 0x00000990, 0x00000000);
+ nv_icmd(dev, 0x00000991, 0x00000000);
+ nv_icmd(dev, 0x00000992, 0x00000000);
+ nv_icmd(dev, 0x00000993, 0x00000000);
+ nv_icmd(dev, 0x00000994, 0x00000000);
+ nv_icmd(dev, 0x00000995, 0x00000000);
+ nv_icmd(dev, 0x00000996, 0x00000000);
+ nv_icmd(dev, 0x00000997, 0x00000000);
+ nv_icmd(dev, 0x00000998, 0x00000000);
+ nv_icmd(dev, 0x00000999, 0x00000000);
+ nv_icmd(dev, 0x0000099a, 0x00000000);
+ nv_icmd(dev, 0x0000099b, 0x00000000);
+ nv_icmd(dev, 0x0000099c, 0x00000000);
+ nv_icmd(dev, 0x0000099d, 0x00000000);
+ nv_icmd(dev, 0x0000099e, 0x00000000);
+ nv_icmd(dev, 0x0000099f, 0x00000000);
+ nv_icmd(dev, 0x000009a0, 0x00000000);
+ nv_icmd(dev, 0x000009a1, 0x00000000);
+ nv_icmd(dev, 0x000009a2, 0x00000000);
+ nv_icmd(dev, 0x000009a3, 0x00000000);
+ nv_icmd(dev, 0x000009a4, 0x00000000);
+ nv_icmd(dev, 0x000009a5, 0x00000000);
+ nv_icmd(dev, 0x000009a6, 0x00000000);
+ nv_icmd(dev, 0x000009a7, 0x00000000);
+ nv_icmd(dev, 0x000009a8, 0x00000000);
+ nv_icmd(dev, 0x000009a9, 0x00000000);
+ nv_icmd(dev, 0x000009aa, 0x00000000);
+ nv_icmd(dev, 0x000009ab, 0x00000000);
+ nv_icmd(dev, 0x000009ac, 0x00000000);
+ nv_icmd(dev, 0x000009ad, 0x00000000);
+ nv_icmd(dev, 0x000009ae, 0x00000000);
+ nv_icmd(dev, 0x000009af, 0x00000000);
+ nv_icmd(dev, 0x000009b0, 0x00000000);
+ nv_icmd(dev, 0x000009b1, 0x00000000);
+ nv_icmd(dev, 0x000009b2, 0x00000000);
+ nv_icmd(dev, 0x000009b3, 0x00000000);
+ nv_icmd(dev, 0x000009b4, 0x00000000);
+ nv_icmd(dev, 0x000009b5, 0x00000000);
+ nv_icmd(dev, 0x000009b6, 0x00000000);
+ nv_icmd(dev, 0x000009b7, 0x00000000);
+ nv_icmd(dev, 0x000009b8, 0x00000000);
+ nv_icmd(dev, 0x000009b9, 0x00000000);
+ nv_icmd(dev, 0x000009ba, 0x00000000);
+ nv_icmd(dev, 0x000009bb, 0x00000000);
+ nv_icmd(dev, 0x000009bc, 0x00000000);
+ nv_icmd(dev, 0x000009bd, 0x00000000);
+ nv_icmd(dev, 0x000009be, 0x00000000);
+ nv_icmd(dev, 0x000009bf, 0x00000000);
+ nv_icmd(dev, 0x000009c0, 0x00000000);
+ nv_icmd(dev, 0x000009c1, 0x00000000);
+ nv_icmd(dev, 0x000009c2, 0x00000000);
+ nv_icmd(dev, 0x000009c3, 0x00000000);
+ nv_icmd(dev, 0x000009c4, 0x00000000);
+ nv_icmd(dev, 0x000009c5, 0x00000000);
+ nv_icmd(dev, 0x000009c6, 0x00000000);
+ nv_icmd(dev, 0x000009c7, 0x00000000);
+ nv_icmd(dev, 0x000009c8, 0x00000000);
+ nv_icmd(dev, 0x000009c9, 0x00000000);
+ nv_icmd(dev, 0x000009ca, 0x00000000);
+ nv_icmd(dev, 0x000009cb, 0x00000000);
+ nv_icmd(dev, 0x000009cc, 0x00000000);
+ nv_icmd(dev, 0x000009cd, 0x00000000);
+ nv_icmd(dev, 0x000009ce, 0x00000000);
+ nv_icmd(dev, 0x000009cf, 0x00000000);
+ nv_icmd(dev, 0x000009d0, 0x00000000);
+ nv_icmd(dev, 0x000009d1, 0x00000000);
+ nv_icmd(dev, 0x000009d2, 0x00000000);
+ nv_icmd(dev, 0x000009d3, 0x00000000);
+ nv_icmd(dev, 0x000009d4, 0x00000000);
+ nv_icmd(dev, 0x000009d5, 0x00000000);
+ nv_icmd(dev, 0x000009d6, 0x00000000);
+ nv_icmd(dev, 0x000009d7, 0x00000000);
+ nv_icmd(dev, 0x000009d8, 0x00000000);
+ nv_icmd(dev, 0x000009d9, 0x00000000);
+ nv_icmd(dev, 0x000009da, 0x00000000);
+ nv_icmd(dev, 0x000009db, 0x00000000);
+ nv_icmd(dev, 0x000009dc, 0x00000000);
+ nv_icmd(dev, 0x000009dd, 0x00000000);
+ nv_icmd(dev, 0x000009de, 0x00000000);
+ nv_icmd(dev, 0x000009df, 0x00000000);
+ nv_icmd(dev, 0x000009e0, 0x00000000);
+ nv_icmd(dev, 0x000009e1, 0x00000000);
+ nv_icmd(dev, 0x000009e2, 0x00000000);
+ nv_icmd(dev, 0x000009e3, 0x00000000);
+ nv_icmd(dev, 0x000009e4, 0x00000000);
+ nv_icmd(dev, 0x000009e5, 0x00000000);
+ nv_icmd(dev, 0x000009e6, 0x00000000);
+ nv_icmd(dev, 0x000009e7, 0x00000000);
+ nv_icmd(dev, 0x000009e8, 0x00000000);
+ nv_icmd(dev, 0x000009e9, 0x00000000);
+ nv_icmd(dev, 0x000009ea, 0x00000000);
+ nv_icmd(dev, 0x000009eb, 0x00000000);
+ nv_icmd(dev, 0x000009ec, 0x00000000);
+ nv_icmd(dev, 0x000009ed, 0x00000000);
+ nv_icmd(dev, 0x000009ee, 0x00000000);
+ nv_icmd(dev, 0x000009ef, 0x00000000);
+ nv_icmd(dev, 0x000009f0, 0x00000000);
+ nv_icmd(dev, 0x000009f1, 0x00000000);
+ nv_icmd(dev, 0x000009f2, 0x00000000);
+ nv_icmd(dev, 0x000009f3, 0x00000000);
+ nv_icmd(dev, 0x000009f4, 0x00000000);
+ nv_icmd(dev, 0x000009f5, 0x00000000);
+ nv_icmd(dev, 0x000009f6, 0x00000000);
+ nv_icmd(dev, 0x000009f7, 0x00000000);
+ nv_icmd(dev, 0x000009f8, 0x00000000);
+ nv_icmd(dev, 0x000009f9, 0x00000000);
+ nv_icmd(dev, 0x000009fa, 0x00000000);
+ nv_icmd(dev, 0x000009fb, 0x00000000);
+ nv_icmd(dev, 0x000009fc, 0x00000000);
+ nv_icmd(dev, 0x000009fd, 0x00000000);
+ nv_icmd(dev, 0x000009fe, 0x00000000);
+ nv_icmd(dev, 0x000009ff, 0x00000000);
+ nv_icmd(dev, 0x00000468, 0x00000004);
+ nv_icmd(dev, 0x0000046c, 0x00000001);
+ nv_icmd(dev, 0x00000470, 0x00000000);
+ nv_icmd(dev, 0x00000471, 0x00000000);
+ nv_icmd(dev, 0x00000472, 0x00000000);
+ nv_icmd(dev, 0x00000473, 0x00000000);
+ nv_icmd(dev, 0x00000474, 0x00000000);
+ nv_icmd(dev, 0x00000475, 0x00000000);
+ nv_icmd(dev, 0x00000476, 0x00000000);
+ nv_icmd(dev, 0x00000477, 0x00000000);
+ nv_icmd(dev, 0x00000478, 0x00000000);
+ nv_icmd(dev, 0x00000479, 0x00000000);
+ nv_icmd(dev, 0x0000047a, 0x00000000);
+ nv_icmd(dev, 0x0000047b, 0x00000000);
+ nv_icmd(dev, 0x0000047c, 0x00000000);
+ nv_icmd(dev, 0x0000047d, 0x00000000);
+ nv_icmd(dev, 0x0000047e, 0x00000000);
+ nv_icmd(dev, 0x0000047f, 0x00000000);
+ nv_icmd(dev, 0x00000480, 0x00000000);
+ nv_icmd(dev, 0x00000481, 0x00000000);
+ nv_icmd(dev, 0x00000482, 0x00000000);
+ nv_icmd(dev, 0x00000483, 0x00000000);
+ nv_icmd(dev, 0x00000484, 0x00000000);
+ nv_icmd(dev, 0x00000485, 0x00000000);
+ nv_icmd(dev, 0x00000486, 0x00000000);
+ nv_icmd(dev, 0x00000487, 0x00000000);
+ nv_icmd(dev, 0x00000488, 0x00000000);
+ nv_icmd(dev, 0x00000489, 0x00000000);
+ nv_icmd(dev, 0x0000048a, 0x00000000);
+ nv_icmd(dev, 0x0000048b, 0x00000000);
+ nv_icmd(dev, 0x0000048c, 0x00000000);
+ nv_icmd(dev, 0x0000048d, 0x00000000);
+ nv_icmd(dev, 0x0000048e, 0x00000000);
+ nv_icmd(dev, 0x0000048f, 0x00000000);
+ nv_icmd(dev, 0x00000490, 0x00000000);
+ nv_icmd(dev, 0x00000491, 0x00000000);
+ nv_icmd(dev, 0x00000492, 0x00000000);
+ nv_icmd(dev, 0x00000493, 0x00000000);
+ nv_icmd(dev, 0x00000494, 0x00000000);
+ nv_icmd(dev, 0x00000495, 0x00000000);
+ nv_icmd(dev, 0x00000496, 0x00000000);
+ nv_icmd(dev, 0x00000497, 0x00000000);
+ nv_icmd(dev, 0x00000498, 0x00000000);
+ nv_icmd(dev, 0x00000499, 0x00000000);
+ nv_icmd(dev, 0x0000049a, 0x00000000);
+ nv_icmd(dev, 0x0000049b, 0x00000000);
+ nv_icmd(dev, 0x0000049c, 0x00000000);
+ nv_icmd(dev, 0x0000049d, 0x00000000);
+ nv_icmd(dev, 0x0000049e, 0x00000000);
+ nv_icmd(dev, 0x0000049f, 0x00000000);
+ nv_icmd(dev, 0x000004a0, 0x00000000);
+ nv_icmd(dev, 0x000004a1, 0x00000000);
+ nv_icmd(dev, 0x000004a2, 0x00000000);
+ nv_icmd(dev, 0x000004a3, 0x00000000);
+ nv_icmd(dev, 0x000004a4, 0x00000000);
+ nv_icmd(dev, 0x000004a5, 0x00000000);
+ nv_icmd(dev, 0x000004a6, 0x00000000);
+ nv_icmd(dev, 0x000004a7, 0x00000000);
+ nv_icmd(dev, 0x000004a8, 0x00000000);
+ nv_icmd(dev, 0x000004a9, 0x00000000);
+ nv_icmd(dev, 0x000004aa, 0x00000000);
+ nv_icmd(dev, 0x000004ab, 0x00000000);
+ nv_icmd(dev, 0x000004ac, 0x00000000);
+ nv_icmd(dev, 0x000004ad, 0x00000000);
+ nv_icmd(dev, 0x000004ae, 0x00000000);
+ nv_icmd(dev, 0x000004af, 0x00000000);
+ nv_icmd(dev, 0x000004b0, 0x00000000);
+ nv_icmd(dev, 0x000004b1, 0x00000000);
+ nv_icmd(dev, 0x000004b2, 0x00000000);
+ nv_icmd(dev, 0x000004b3, 0x00000000);
+ nv_icmd(dev, 0x000004b4, 0x00000000);
+ nv_icmd(dev, 0x000004b5, 0x00000000);
+ nv_icmd(dev, 0x000004b6, 0x00000000);
+ nv_icmd(dev, 0x000004b7, 0x00000000);
+ nv_icmd(dev, 0x000004b8, 0x00000000);
+ nv_icmd(dev, 0x000004b9, 0x00000000);
+ nv_icmd(dev, 0x000004ba, 0x00000000);
+ nv_icmd(dev, 0x000004bb, 0x00000000);
+ nv_icmd(dev, 0x000004bc, 0x00000000);
+ nv_icmd(dev, 0x000004bd, 0x00000000);
+ nv_icmd(dev, 0x000004be, 0x00000000);
+ nv_icmd(dev, 0x000004bf, 0x00000000);
+ nv_icmd(dev, 0x000004c0, 0x00000000);
+ nv_icmd(dev, 0x000004c1, 0x00000000);
+ nv_icmd(dev, 0x000004c2, 0x00000000);
+ nv_icmd(dev, 0x000004c3, 0x00000000);
+ nv_icmd(dev, 0x000004c4, 0x00000000);
+ nv_icmd(dev, 0x000004c5, 0x00000000);
+ nv_icmd(dev, 0x000004c6, 0x00000000);
+ nv_icmd(dev, 0x000004c7, 0x00000000);
+ nv_icmd(dev, 0x000004c8, 0x00000000);
+ nv_icmd(dev, 0x000004c9, 0x00000000);
+ nv_icmd(dev, 0x000004ca, 0x00000000);
+ nv_icmd(dev, 0x000004cb, 0x00000000);
+ nv_icmd(dev, 0x000004cc, 0x00000000);
+ nv_icmd(dev, 0x000004cd, 0x00000000);
+ nv_icmd(dev, 0x000004ce, 0x00000000);
+ nv_icmd(dev, 0x000004cf, 0x00000000);
+ nv_icmd(dev, 0x00000510, 0x3f800000);
+ nv_icmd(dev, 0x00000511, 0x3f800000);
+ nv_icmd(dev, 0x00000512, 0x3f800000);
+ nv_icmd(dev, 0x00000513, 0x3f800000);
+ nv_icmd(dev, 0x00000514, 0x3f800000);
+ nv_icmd(dev, 0x00000515, 0x3f800000);
+ nv_icmd(dev, 0x00000516, 0x3f800000);
+ nv_icmd(dev, 0x00000517, 0x3f800000);
+ nv_icmd(dev, 0x00000518, 0x3f800000);
+ nv_icmd(dev, 0x00000519, 0x3f800000);
+ nv_icmd(dev, 0x0000051a, 0x3f800000);
+ nv_icmd(dev, 0x0000051b, 0x3f800000);
+ nv_icmd(dev, 0x0000051c, 0x3f800000);
+ nv_icmd(dev, 0x0000051d, 0x3f800000);
+ nv_icmd(dev, 0x0000051e, 0x3f800000);
+ nv_icmd(dev, 0x0000051f, 0x3f800000);
+ nv_icmd(dev, 0x00000520, 0x000002b6);
+ nv_icmd(dev, 0x00000529, 0x00000001);
+ nv_icmd(dev, 0x00000530, 0xffff0000);
+ nv_icmd(dev, 0x00000531, 0xffff0000);
+ nv_icmd(dev, 0x00000532, 0xffff0000);
+ nv_icmd(dev, 0x00000533, 0xffff0000);
+ nv_icmd(dev, 0x00000534, 0xffff0000);
+ nv_icmd(dev, 0x00000535, 0xffff0000);
+ nv_icmd(dev, 0x00000536, 0xffff0000);
+ nv_icmd(dev, 0x00000537, 0xffff0000);
+ nv_icmd(dev, 0x00000538, 0xffff0000);
+ nv_icmd(dev, 0x00000539, 0xffff0000);
+ nv_icmd(dev, 0x0000053a, 0xffff0000);
+ nv_icmd(dev, 0x0000053b, 0xffff0000);
+ nv_icmd(dev, 0x0000053c, 0xffff0000);
+ nv_icmd(dev, 0x0000053d, 0xffff0000);
+ nv_icmd(dev, 0x0000053e, 0xffff0000);
+ nv_icmd(dev, 0x0000053f, 0xffff0000);
+ nv_icmd(dev, 0x00000585, 0x0000003f);
+ nv_icmd(dev, 0x00000576, 0x00000003);
+ nv_icmd(dev, 0x00000586, 0x00000040);
+ nv_icmd(dev, 0x00000582, 0x00000080);
+ nv_icmd(dev, 0x00000583, 0x00000080);
+ nv_icmd(dev, 0x000005c2, 0x00000001);
+ nv_icmd(dev, 0x00000638, 0x00000001);
+ nv_icmd(dev, 0x00000639, 0x00000001);
+ nv_icmd(dev, 0x0000063a, 0x00000002);
+ nv_icmd(dev, 0x0000063b, 0x00000001);
+ nv_icmd(dev, 0x0000063c, 0x00000001);
+ nv_icmd(dev, 0x0000063d, 0x00000002);
+ nv_icmd(dev, 0x0000063e, 0x00000001);
+ nv_icmd(dev, 0x000008b8, 0x00000001);
+ nv_icmd(dev, 0x000008b9, 0x00000001);
+ nv_icmd(dev, 0x000008ba, 0x00000001);
+ nv_icmd(dev, 0x000008bb, 0x00000001);
+ nv_icmd(dev, 0x000008bc, 0x00000001);
+ nv_icmd(dev, 0x000008bd, 0x00000001);
+ nv_icmd(dev, 0x000008be, 0x00000001);
+ nv_icmd(dev, 0x000008bf, 0x00000001);
+ nv_icmd(dev, 0x00000900, 0x00000001);
+ nv_icmd(dev, 0x00000901, 0x00000001);
+ nv_icmd(dev, 0x00000902, 0x00000001);
+ nv_icmd(dev, 0x00000903, 0x00000001);
+ nv_icmd(dev, 0x00000904, 0x00000001);
+ nv_icmd(dev, 0x00000905, 0x00000001);
+ nv_icmd(dev, 0x00000906, 0x00000001);
+ nv_icmd(dev, 0x00000907, 0x00000001);
+ nv_icmd(dev, 0x00000908, 0x00000002);
+ nv_icmd(dev, 0x00000909, 0x00000002);
+ nv_icmd(dev, 0x0000090a, 0x00000002);
+ nv_icmd(dev, 0x0000090b, 0x00000002);
+ nv_icmd(dev, 0x0000090c, 0x00000002);
+ nv_icmd(dev, 0x0000090d, 0x00000002);
+ nv_icmd(dev, 0x0000090e, 0x00000002);
+ nv_icmd(dev, 0x0000090f, 0x00000002);
+ nv_icmd(dev, 0x00000910, 0x00000001);
+ nv_icmd(dev, 0x00000911, 0x00000001);
+ nv_icmd(dev, 0x00000912, 0x00000001);
+ nv_icmd(dev, 0x00000913, 0x00000001);
+ nv_icmd(dev, 0x00000914, 0x00000001);
+ nv_icmd(dev, 0x00000915, 0x00000001);
+ nv_icmd(dev, 0x00000916, 0x00000001);
+ nv_icmd(dev, 0x00000917, 0x00000001);
+ nv_icmd(dev, 0x00000918, 0x00000001);
+ nv_icmd(dev, 0x00000919, 0x00000001);
+ nv_icmd(dev, 0x0000091a, 0x00000001);
+ nv_icmd(dev, 0x0000091b, 0x00000001);
+ nv_icmd(dev, 0x0000091c, 0x00000001);
+ nv_icmd(dev, 0x0000091d, 0x00000001);
+ nv_icmd(dev, 0x0000091e, 0x00000001);
+ nv_icmd(dev, 0x0000091f, 0x00000001);
+ nv_icmd(dev, 0x00000920, 0x00000002);
+ nv_icmd(dev, 0x00000921, 0x00000002);
+ nv_icmd(dev, 0x00000922, 0x00000002);
+ nv_icmd(dev, 0x00000923, 0x00000002);
+ nv_icmd(dev, 0x00000924, 0x00000002);
+ nv_icmd(dev, 0x00000925, 0x00000002);
+ nv_icmd(dev, 0x00000926, 0x00000002);
+ nv_icmd(dev, 0x00000927, 0x00000002);
+ nv_icmd(dev, 0x00000928, 0x00000001);
+ nv_icmd(dev, 0x00000929, 0x00000001);
+ nv_icmd(dev, 0x0000092a, 0x00000001);
+ nv_icmd(dev, 0x0000092b, 0x00000001);
+ nv_icmd(dev, 0x0000092c, 0x00000001);
+ nv_icmd(dev, 0x0000092d, 0x00000001);
+ nv_icmd(dev, 0x0000092e, 0x00000001);
+ nv_icmd(dev, 0x0000092f, 0x00000001);
+ nv_icmd(dev, 0x00000648, 0x00000001);
+ nv_icmd(dev, 0x00000649, 0x00000001);
+ nv_icmd(dev, 0x0000064a, 0x00000001);
+ nv_icmd(dev, 0x0000064b, 0x00000001);
+ nv_icmd(dev, 0x0000064c, 0x00000001);
+ nv_icmd(dev, 0x0000064d, 0x00000001);
+ nv_icmd(dev, 0x0000064e, 0x00000001);
+ nv_icmd(dev, 0x0000064f, 0x00000001);
+ nv_icmd(dev, 0x00000650, 0x00000001);
+ nv_icmd(dev, 0x00000658, 0x0000000f);
+ nv_icmd(dev, 0x000007ff, 0x0000000a);
+ nv_icmd(dev, 0x0000066a, 0x40000000);
+ nv_icmd(dev, 0x0000066b, 0x10000000);
+ nv_icmd(dev, 0x0000066c, 0xffff0000);
+ nv_icmd(dev, 0x0000066d, 0xffff0000);
+ nv_icmd(dev, 0x000007af, 0x00000008);
+ nv_icmd(dev, 0x000007b0, 0x00000008);
+ nv_icmd(dev, 0x000007f6, 0x00000001);
+ nv_icmd(dev, 0x000006b2, 0x00000055);
+ nv_icmd(dev, 0x000007ad, 0x00000003);
+ nv_icmd(dev, 0x00000937, 0x00000001);
+ nv_icmd(dev, 0x00000971, 0x00000008);
+ nv_icmd(dev, 0x00000972, 0x00000040);
+ nv_icmd(dev, 0x00000973, 0x0000012c);
+ nv_icmd(dev, 0x0000097c, 0x00000040);
+ nv_icmd(dev, 0x00000979, 0x00000003);
+ nv_icmd(dev, 0x00000975, 0x00000020);
+ nv_icmd(dev, 0x00000976, 0x00000001);
+ nv_icmd(dev, 0x00000977, 0x00000020);
+ nv_icmd(dev, 0x00000978, 0x00000001);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095e, 0x20164010);
+ nv_icmd(dev, 0x0000095f, 0x00000020);
+ nv_icmd(dev, 0x00000683, 0x00000006);
+ nv_icmd(dev, 0x00000685, 0x003fffff);
+ nv_icmd(dev, 0x00000687, 0x00000c48);
+ nv_icmd(dev, 0x000006a0, 0x00000005);
+ nv_icmd(dev, 0x00000840, 0x00300008);
+ nv_icmd(dev, 0x00000841, 0x04000080);
+ nv_icmd(dev, 0x00000842, 0x00300008);
+ nv_icmd(dev, 0x00000843, 0x04000080);
+ nv_icmd(dev, 0x00000818, 0x00000000);
+ nv_icmd(dev, 0x00000819, 0x00000000);
+ nv_icmd(dev, 0x0000081a, 0x00000000);
+ nv_icmd(dev, 0x0000081b, 0x00000000);
+ nv_icmd(dev, 0x0000081c, 0x00000000);
+ nv_icmd(dev, 0x0000081d, 0x00000000);
+ nv_icmd(dev, 0x0000081e, 0x00000000);
+ nv_icmd(dev, 0x0000081f, 0x00000000);
+ nv_icmd(dev, 0x00000848, 0x00000000);
+ nv_icmd(dev, 0x00000849, 0x00000000);
+ nv_icmd(dev, 0x0000084a, 0x00000000);
+ nv_icmd(dev, 0x0000084b, 0x00000000);
+ nv_icmd(dev, 0x0000084c, 0x00000000);
+ nv_icmd(dev, 0x0000084d, 0x00000000);
+ nv_icmd(dev, 0x0000084e, 0x00000000);
+ nv_icmd(dev, 0x0000084f, 0x00000000);
+ nv_icmd(dev, 0x00000850, 0x00000000);
+ nv_icmd(dev, 0x00000851, 0x00000000);
+ nv_icmd(dev, 0x00000852, 0x00000000);
+ nv_icmd(dev, 0x00000853, 0x00000000);
+ nv_icmd(dev, 0x00000854, 0x00000000);
+ nv_icmd(dev, 0x00000855, 0x00000000);
+ nv_icmd(dev, 0x00000856, 0x00000000);
+ nv_icmd(dev, 0x00000857, 0x00000000);
+ nv_icmd(dev, 0x00000738, 0x00000000);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ab, 0x00000002);
+ nv_icmd(dev, 0x000006ac, 0x00000080);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x000006bb, 0x000000cf);
+ nv_icmd(dev, 0x000006ce, 0x2a712488);
+ nv_icmd(dev, 0x00000739, 0x4085c000);
+ nv_icmd(dev, 0x0000073a, 0x00000080);
+ nv_icmd(dev, 0x00000786, 0x80000100);
+ nv_icmd(dev, 0x0000073c, 0x00010100);
+ nv_icmd(dev, 0x0000073d, 0x02800000);
+ nv_icmd(dev, 0x00000787, 0x000000cf);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x00000836, 0x00000001);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x00000944, 0x00000022);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0000c1b0, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b1, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b2, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b3, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b4, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b5, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b6, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b7, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x0000c1b9, 0x00fac688);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000002);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000014);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000001);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+ nv_wr32(dev, 0x404154, 0x00000400);
+
+ nvc0_grctx_generate_9097(dev);
+ nvc0_grctx_generate_902d(dev);
+ nvc0_grctx_generate_9039(dev);
+ nvc0_grctx_generate_90c0(dev);
+
+ nv_wr32(dev, 0x000260, r000260);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78a908..c0909174905 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -25,206 +25,207 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+struct nvc0_instmem_priv {
+ struct nouveau_gpuobj *bar1_pgd;
+ struct nouveau_channel *bar1;
+ struct nouveau_gpuobj *bar3_pgd;
+ struct nouveau_channel *bar3;
+ struct nouveau_gpuobj *chan_pgd;
+};
int
-nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *size)
+nvc0_instmem_suspend(struct drm_device *dev)
{
- int ret;
-
- *size = ALIGN(*size, 4096);
- if (*size == 0)
- return -EINVAL;
-
- ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
- true, false, &gpuobj->im_backing);
- if (ret) {
- NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
- return ret;
- }
-
- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- return ret;
- }
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+ dev_priv->ramin_available = false;
return 0;
}
void
-nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nvc0_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- nouveau_bo_unpin(gpuobj->im_backing);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- gpuobj->im_backing = NULL;
- }
+ nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
+ nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
+ dev_priv->ramin_available = true;
}
-int
-nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+static void
+nvc0_channel_del(struct nouveau_channel **pchan)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t pte, pte_end;
- uint64_t vram;
-
- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
- gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+ struct nouveau_channel *chan;
+
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
+
+ nouveau_vm_ref(NULL, &chan->vm, NULL);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
+}
- pte = gpuobj->im_pramin->start >> 12;
- pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- vram = gpuobj->vinst;
+static int
+nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+ struct nouveau_channel **pchan,
+ struct nouveau_gpuobj *pgd, u64 vm_size)
+{
+ struct nouveau_channel *chan;
+ int ret;
- NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
- gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
- while (pte < pte_end) {
- nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
- nv_wr32(dev, 0x702004 + (pte * 8), 0);
- vram += 4096;
- pte++;
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- dev_priv->engine.instmem.flush(dev);
- if (1) {
- u32 chan = nv_rd32(dev, 0x1700) << 16;
- nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
- nv_wr32(dev, 0x100cbc, 0x80000005);
+ ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- gpuobj->im_bound = 1;
- return 0;
-}
-
-int
-nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t pte, pte_end;
-
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- pte = gpuobj->im_pramin->start >> 12;
- pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- while (pte < pte_end) {
- nv_wr32(dev, 0x702000 + (pte * 8), 0);
- nv_wr32(dev, 0x702004 + (pte * 8), 0);
- pte++;
+ ret = nouveau_vm_ref(vm, &chan->vm, NULL);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- dev_priv->engine.instmem.flush(dev);
- gpuobj->im_bound = 0;
- return 0;
-}
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
+ nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
-void
-nvc0_instmem_flush(struct drm_device *dev)
-{
- nv_wr32(dev, 0x070000, 1);
- if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
+ *pchan = chan;
+ return 0;
}
int
-nvc0_instmem_suspend(struct drm_device *dev)
+nvc0_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf;
- int i;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct pci_dev *pdev = dev->pdev;
+ struct nvc0_instmem_priv *priv;
+ struct nouveau_vm *vm = NULL;
+ int ret;
- dev_priv->susres.ramin_copy = vmalloc(65536);
- if (!dev_priv->susres.ramin_copy)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- buf = dev_priv->susres.ramin_copy;
-
- for (i = 0; i < 65536; i += 4)
- buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+ pinstmem->priv = priv;
+
+ /* BAR3 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
+ &dev_priv->bar3_vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL,
+ (pci_resource_len(pdev, 3) >> 12) * 8, 0,
+ NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
+ if (ret)
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
+ priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
+ if (ret)
+ goto error;
+
+ /* BAR1 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
+ priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
+ if (ret)
+ goto error;
+
+ /* channel vm */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
+ if (ret)
+ goto error;
+
+ nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ nvc0_instmem_resume(dev);
return 0;
+error:
+ nvc0_instmem_takedown(dev);
+ return ret;
}
void
-nvc0_instmem_resume(struct drm_device *dev)
+nvc0_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf = dev_priv->susres.ramin_copy;
- u64 chan;
- int i;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_vm *vm = NULL;
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- nv_wr32(dev, 0x001700, chan >> 16);
+ nvc0_instmem_suspend(dev);
- for (i = 0; i < 65536; i += 4)
- nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
- vfree(dev_priv->susres.ramin_copy);
- dev_priv->susres.ramin_copy = NULL;
+ nv_wr32(dev, 0x1704, 0x00000000);
+ nv_wr32(dev, 0x1714, 0x00000000);
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-}
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
-int
-nvc0_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
- int ret, i;
-
- dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- imem = 4096 + 4096 + 32768;
-
- nv_wr32(dev, 0x001700, chan >> 16);
-
- /* channel setup */
- nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700208, lower_32_bits(lim3));
- nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
-
- /* point pgd -> pgt */
- nv_wr32(dev, 0x701000, 0);
- nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
-
- /* point pgt -> physical vram for channel */
- pgt3 = 0x2000;
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
-
- /* clear rest of pgt */
- for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, 0);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
-
- /* point bar3 at the channel */
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-
- /* Global PRAMIN heap */
- ret = drm_mm_init(&dev_priv->ramin_heap, imem,
- dev_priv->ramin_size - imem);
- if (ret) {
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
- return -ENOMEM;
- }
+ nvc0_channel_del(&priv->bar1);
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
- return 0;
-}
+ nvc0_channel_del(&priv->bar3);
+ nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
+ nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-void
-nvc0_instmem_takedown(struct drm_device *dev)
-{
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
new file mode 100644
index 00000000000..4b9251bb0ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
+ struct nouveau_gpuobj *pgt[2])
+{
+ u32 pde[2] = { 0, 0 };
+
+ if (pgt[0])
+ pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
+ if (pgt[1])
+ pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
+
+ nv_wo32(pgd, (index * 8) + 0, pde[0]);
+ nv_wo32(pgd, (index * 8) + 4, pde[1]);
+}
+
+static inline u64
+nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+ phys >>= 8;
+
+ phys |= 0x00000001; /* present */
+// if (vma->access & NV_MEM_ACCESS_SYS)
+// phys |= 0x00000002;
+
+ phys |= ((u64)target << 32);
+ phys |= ((u64)memtype << 36);
+
+ return phys;
+}
+
+void
+nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+ u32 next = 1 << (vma->node->type - 8);
+
+ phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ phys += next;
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u32 pte, dma_addr_t *list, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct drm_device *dev = vm->dev;
+ struct nouveau_vm_pgd *vpgd;
+ u32 r100c80, engine;
+
+ pinstmem->flush(vm->dev);
+
+ if (vm == dev_priv->chan_vm)
+ engine = 1;
+ else
+ engine = 5;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ r100c80 = nv_rd32(dev, 0x100c80);
+ nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
+ nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
+ if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
+ NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
new file mode 100644
index 00000000000..858eda5dedd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+bool
+nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
+ case 0x0000:
+ case 0xfe00:
+ case 0xdb00:
+ case 0x1100:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+int
+nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
+ u32 type, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ struct nouveau_vram *vram;
+ int ret;
+
+ size >>= 12;
+ align >>= 12;
+ ncmin >>= 12;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vram->regions);
+ vram->dev = dev_priv->dev;
+ vram->memtype = type;
+ vram->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &vram);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &vram->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ vram->offset = (u64)r->offset << 12;
+ *pvram = vram;
+ return 0;
+}
+
+int
+nvc0_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
+ dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+ dev_priv->vram_rblock_size = 4096;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a58561..fe0f253089a 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
#define NV_PCRTC_START 0x00600800
#define NV_PCRTC_CONFIG 0x00600804
# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
-# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
+# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0)
+# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
#define NV_PCRTC_CURSOR_CONFIG 0x00600810
# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 6cae4f2028d..e47eecfc2df 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,10 +65,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+ radeon_trace_points.o ni.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
+
+CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index c714179d1bf..c61c3fe9fb9 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -37,6 +37,8 @@
#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
#define GRAPH_OBJECT_TYPE_ROUTER 0x4
/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6
+#define GRAPH_OBJECT_TYPE_GENERIC 0x7
/****************************************************/
/* Encoder Object ID Definition */
@@ -64,6 +66,9 @@
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
+#define ENCODER_OBJECT_ID_ALMOND 0x22
+#define ENCODER_OBJECT_ID_TRAVIS 0x23
+#define ENCODER_OBJECT_ID_NUTMEG 0x22
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
@@ -108,6 +113,7 @@
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
/* deleted */
@@ -124,6 +130,7 @@
#define GENERIC_OBJECT_ID_GLSYNC 0x01
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
/****************************************************/
/* Graphics Object ENUM ID Definition */
@@ -360,6 +367,26 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
@@ -421,6 +448,14 @@
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -512,6 +547,7 @@
#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
@@ -593,6 +629,14 @@
GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
+#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
@@ -621,6 +665,10 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 05efb5b9f13..258fa5e7a2d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -734,16 +734,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
- uint32_t dst, src1, src2, saved;
+ uint32_t dst, mask, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- SDEBUG(" src1: ");
- src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
- SDEBUG(" src2: ");
- src2 = atom_get_src(ctx, attr, ptr);
- dst &= src1;
- dst |= src2;
+ mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+ SDEBUG(" mask: 0x%08x", mask);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst &= mask;
+ dst |= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index fe359a239df..58a0cd02c0a 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -73,8 +73,18 @@
#define ATOM_PPLL1 0
#define ATOM_PPLL2 1
#define ATOM_DCPLL 2
+#define ATOM_PPLL0 2
+#define ATOM_EXT_PLL1 8
+#define ATOM_EXT_PLL2 9
+#define ATOM_EXT_CLOCK 10
#define ATOM_PPLL_INVALID 0xFF
+#define ENCODER_REFCLK_SRC_P1PLL 0
+#define ENCODER_REFCLK_SRC_P2PLL 1
+#define ENCODER_REFCLK_SRC_DCPLL 2
+#define ENCODER_REFCLK_SRC_EXTCLK 3
+#define ENCODER_REFCLK_SRC_INVALID 0xFF
+
#define ATOM_SCALER1 0
#define ATOM_SCALER2 1
@@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER
/*Image can't be updated, while Driver needs to carry the new table! */
}ATOM_COMMON_TABLE_HEADER;
+/****************************************************************************/
+// Structure stores the ROM header.
+/****************************************************************************/
typedef struct _ATOM_ROM_HEADER
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER
#define USHORT void*
#endif
+/****************************************************************************/
+// Structures used in Command.mtb
+/****************************************************************************/
typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
@@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
+#define GetDispObjectInfo EnableYUV
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
@@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
/****************************************************************************/
#define COMPUTE_MEMORY_PLL_PARAM 1
#define COMPUTE_ENGINE_PLL_PARAM 2
+#define ADJUST_MC_SETTING_PARAM 3
+
+/****************************************************************************/
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulClockFreq:24;
+#else
+ ULONG ulClockFreq:24;
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG 0x80
typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
{
@@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
#endif
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ union
+ {
+ UCHAR ucCntlFlag; //Output Flags
+ UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+ };
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
+
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02
#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_ENCODER_CONFIG_LINKA 0x00
#define ATOM_ENCODER_CONFIG_LINKB 0x04
@@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
#define ATOM_ENCODER_MODE_TV 13
#define ATOM_ENCODER_MODE_CV 14
#define ATOM_ENCODER_MODE_CRT 15
+#define ATOM_ENCODER_MODE_DVO 16
+#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2
typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
{
@@ -661,6 +720,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
@@ -671,24 +731,34 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
{
#if ATOM_BIG_ENDIAN
UCHAR ucReserved1:1;
- UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
UCHAR ucReserved:3;
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
UCHAR ucReserved:3;
- UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
UCHAR ucReserved1:1;
#endif
}ATOM_DIG_ENCODER_CONFIG_V3;
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
-
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50
typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
{
@@ -707,6 +777,56 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
UCHAR ucReserved;
}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucReserved:2;
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+#else
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+ UCHAR ucReserved:2;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ union{
+ ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
// define ucBitPerColor:
#define PANEL_BPC_UNDEFINE 0x00
@@ -893,6 +1013,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
#endif
}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
{
union
@@ -936,6 +1057,149 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+ UCHAR ucLaneSel;
+ union
+ {
+ UCHAR ucLaneSet;
+ struct {
+#if ATOM_BIG_ENDIAN
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+#else
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+#endif
+ };
+ };
+}ATOM_DP_VS_MODE_V4;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version
+ };
+ union
+ {
+ ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
+
+
+/****************************************************************************/
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+ union{
+ USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT
+ USHORT usConnectorId; // connector id, valid when ucAction = INIT
+ };
+ UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucAction; //
+ UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+ UCHAR ucReserved;
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+ EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+ ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
/****************************************************************************/
// Structures used by DAC1OuputControlTable
// DAC2OuputControlTable
@@ -1142,6 +1406,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2
#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
+
typedef struct _PIXEL_CLOCK_PARAMETERS_V3
{
USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
@@ -1202,6 +1467,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5
#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+ union{
+ CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency
+ ULONG ulDispEngClkFreq; // dispclk frequency
+ };
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
+
typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
{
PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
@@ -1241,10 +1555,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
{
USHORT usPixelClock; // target pixel clock
- UCHAR ucTransmitterID; // transmitter id defined in objectid.h
+ UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h
UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
- UCHAR ucReserved[3];
+ UCHAR ucExtTransmitterID; // external encoder id.
+ UCHAR ucReserved[2];
}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
// usDispPllConfig v1.2 for RoadRunner
@@ -1358,6 +1673,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
/**************************************************************************/
#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
/****************************************************************************/
// Structures used by PowerConnectorDetectionTable
/****************************************************************************/
@@ -1438,6 +1754,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+ USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8
+
#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
/**************************************************************************/
@@ -1706,7 +2047,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT StandardVESA_Timing; // Only used by Bios
USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
USHORT DAC_Info; // Will be obsolete from R600
- USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
+ USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT TMDS_Info; // Will be obsolete from R600
USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
@@ -1736,12 +2077,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
}ATOM_MASTER_LIST_OF_DATA_TABLES;
+// For backward compatible
+#define LVDS_Info LCD_Info
+
typedef struct _ATOM_MASTER_DATA_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
}ATOM_MASTER_DATA_TABLE;
+
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
/****************************************************************************/
@@ -1776,6 +2121,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
}ATOM_MULTIMEDIA_CONFIG_INFO;
+
/****************************************************************************/
// Structures used in FirmwareInfoTable
/****************************************************************************/
@@ -2031,8 +2377,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1
UCHAR ucReserved4[3];
}ATOM_FIRMWARE_INFO_V2_1;
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved[2];
+ ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+ ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ?
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.
+ UCHAR ucReserved3; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+ ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+ ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+ USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved9[3];
+ USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ USHORT usReserved12;
+ ULONG ulReserved10[3]; // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
-#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
/****************************************************************************/
// Structures used in IntegratedSystemInfoTable
@@ -2212,7 +2597,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi
ucDockingPinBit: which bit in this register to read the pin status;
ucDockingPinPolarity:Polarity of the pin when docked;
-ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
+ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
@@ -2250,6 +2635,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
usMinDownStreamHTLinkWidth: same as above.
*/
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition
+#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
+
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
@@ -2778,8 +3171,88 @@ typedef struct _ATOM_LVDS_INFO_V12
#define PANEL_RANDOM_DITHER 0x80
#define PANEL_RANDOM_DITHER_MASK 0x80
+#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this
+
+/****************************************************************************/
+// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+typedef struct _ATOM_LCD_INFO_V13
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ ULONG ulReserved0;
+ UCHAR ucLCD_Misc; // Reorganized in V13
+ // Bit0: {=0:single, =1:dual},
+ // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB},
+ // Bit3:2: {Grey level}
+ // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h)
+ // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it?
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13
+ // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+ // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+ // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+ // Bit7-3: Reserved
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13
+
+ UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
+ UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
+
+ UCHAR ucOffDelay_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
+ UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
+ UCHAR ucReserved1;
+
+ ULONG ulReserved[4];
+}ATOM_LCD_INFO_V13;
+
+#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL 0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI 0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
-#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
typedef struct _ATOM_PATCH_RECORD_MODE
{
@@ -2944,9 +3417,9 @@ typedef struct _ATOM_DPCD_INFO
#define MAX_DTD_MODE_IN_VRAM 6
#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
-#define DFP_ENCODER_TYPE_OFFSET 0x80
-#define DP_ENCODER_LANE_NUM_OFFSET 0x84
-#define DP_ENCODER_LINK_RATE_OFFSET 0x88
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)
+#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 )
#define ATOM_HWICON1_SURFACE_ADDR 0
#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
@@ -2997,14 +3470,16 @@ typedef struct _ATOM_DPCD_INFO
#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
-#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024)
+#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512
//The size below is in Kb!
#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+#define ATOM_VRAM_RESERVE_V2_SIZE 32
+
#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
@@ -3206,6 +3681,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH
USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
}ATOM_DISPLAY_OBJECT_PATH;
+typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
{
UCHAR ucNumOfDispPath;
@@ -3261,6 +3745,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset
#define EXT_AUXDDC_LUTINDEX_7 7
#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDP_Lane3_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane0_Source:2;
+#else
+ UCHAR ucDP_Lane0_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping.
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDVI_CLK_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA2_Source:2;
+#else
+ UCHAR ucDVI_DATA2_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
typedef struct _EXT_DISPLAY_PATH
{
USHORT usDeviceTag; //A bit vector to show what devices are supported
@@ -3269,7 +3794,13 @@ typedef struct _EXT_DISPLAY_PATH
UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
USHORT usExtEncoderObjId; //external encoder object id
- USHORT usReserved[3];
+ union{
+ UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping
+ ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+ ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+ };
+ UCHAR ucReserved;
+ USHORT usReserved[2];
}EXT_DISPLAY_PATH;
#define NUMBER_OF_UCHAR_FOR_GUID 16
@@ -3281,7 +3812,8 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
- UCHAR Reserved [7]; // for potential expansion
+ UCHAR uc3DStereoPinId; // use for eDP panel
+ UCHAR Reserved [6]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
//Related definitions, all records are differnt but they have a commond header
@@ -3311,10 +3843,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
+#define ATOM_ENCODER_CAP_RECORD_TYPE 20
//Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@@ -3441,6 +3974,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
UCHAR ucPadding[2];
}ATOM_ENCODER_DVO_CF_RECORD;
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
+
+typedef struct _ATOM_ENCODER_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ union {
+ USHORT usEncoderCap;
+ struct {
+#if ATOM_BIG_ENDIAN
+ USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+#else
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+ USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
+#endif
+ };
+ };
+}ATOM_ENCODER_CAP_RECORD;
+
// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
@@ -3580,6 +4133,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL
#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
#define VOLTAGE_CONTROL_ID_DS4402 0x04
+#define VOLTAGE_CONTROL_ID_UP6266 0x05
+#define VOLTAGE_CONTROL_ID_SCORPIO 0x06
+#define VOLTAGE_CONTROL_ID_VT1556M 0x07
+#define VOLTAGE_CONTROL_ID_CHL822x 0x08
+#define VOLTAGE_CONTROL_ID_VT1586M 0x09
typedef struct _ATOM_VOLTAGE_OBJECT
{
@@ -3670,66 +4228,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO
#define POWER_SENSOR_GPIO 0x01
#define POWER_SENSOR_I2C 0x02
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+ ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table
+ ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+ ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+ USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK
+ USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulBootUpEngineClock;
ULONG ulDentistVCOFreq;
ULONG ulBootUpUMAClock;
- ULONG ulReserved1[8];
+ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
ULONG ulBootUpReqDisplayVector;
ULONG ulOtherDisplayMisc;
ULONG ulGPUCapInfo;
- ULONG ulReserved2[3];
+ ULONG ulSB_MMIO_Base_Addr;
+ USHORT usRequestedPWMFreqInHz;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucHtcHystLmt;
+ ULONG ulMinEngineClock;
ULONG ulSystemConfig;
ULONG ulCPUCapInfo;
- USHORT usMaxNBVoltage;
- USHORT usMinNBVoltage;
- USHORT usBootUpNBVoltage;
- USHORT usExtDispConnInfoOffset;
- UCHAR ucHtcTmpLmt;
- UCHAR ucTjOffset;
+ USHORT usNBP0Voltage;
+ USHORT usNBP1Voltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
ULONG ulCSR_M3_ARB_CNTL_UVD[10];
ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
- ULONG ulReserved3[42];
+ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
+ ULONG ulGMCRestoreResetTime;
+ ULONG ulMinimumNClk;
+ ULONG ulIdleNClk;
+ ULONG ulDDR_DLL_PowerUpTime;
+ ULONG ulDDR_PLL_PowerUpTime;
+ USHORT usPCIEClkSSPercentage;
+ USHORT usPCIEClkSSType;
+ USHORT usLvdsSSPercentage;
+ USHORT usLvdsSSpreadRateIn10Hz;
+ USHORT usHDMISSPercentage;
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+ ULONG ulReserved3[21];
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
+
+
/**********************************************************************************************************************
-// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
-//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
-//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
-//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
-//ulReserved1[8] Reserved by now, must be 0x0.
-//ulBootUpReqDisplayVector VBIOS boot up display IDs
-// ATOM_DEVICE_CRT1_SUPPORT 0x0001
-// ATOM_DEVICE_CRT2_SUPPORT 0x0010
-// ATOM_DEVICE_DFP1_SUPPORT 0x0008
-// ATOM_DEVICE_DFP6_SUPPORT 0x0040
-// ATOM_DEVICE_DFP2_SUPPORT 0x0080
-// ATOM_DEVICE_DFP3_SUPPORT 0x0200
-// ATOM_DEVICE_DFP4_SUPPORT 0x0400
-// ATOM_DEVICE_DFP5_SUPPORT 0x0800
-// ATOM_DEVICE_LCD1_SUPPORT 0x0002
-//ulOtherDisplayMisc Other display related flags, not defined yet.
-//ulGPUCapInfo TBD
-//ulReserved2[3] must be 0x0 for the reserved.
-//ulSystemConfig TBD
-//ulCPUCapInfo TBD
-//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usBootUpNBVoltage Boot up NB voltage in unit of mv.
-//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
-//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
-//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
-//ucUMAChannelNumber System memory channel numbers.
-//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
-//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
-//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
-//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage: Report Display clock voltage requirement.
+
+ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+ ATOM_DEVICE_CRT1_SUPPORT 0x0001
+ ATOM_DEVICE_CRT2_SUPPORT 0x0010
+ ATOM_DEVICE_DFP1_SUPPORT 0x0008
+ ATOM_DEVICE_DFP6_SUPPORT 0x0040
+ ATOM_DEVICE_DFP2_SUPPORT 0x0080
+ ATOM_DEVICE_DFP3_SUPPORT 0x0200
+ ATOM_DEVICE_DFP4_SUPPORT 0x0400
+ ATOM_DEVICE_DFP5_SUPPORT 0x0800
+ ATOM_DEVICE_LCD1_SUPPORT 0x0002
+ulOtherDisplayMisc: Other display related flags, not defined yet.
+ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+ bit[3]=0: Enable HW AUX mode detection logic
+ =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+ that BL control from GPU is expected.
+ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+ it's per platform
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
+ Threshold on value to enter HTC_active state.
+ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
+ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
+ =1: PCIE Power Gating Enabled
+ Bit[1]=0: DDR-DLL shut-down feature disabled.
+ 1: DDR-DLL shut-down feature enabled.
+ Bit[2]=0: DDR-PLL Power down feature disabled.
+ 1: DDR-PLL Power down feature enabled.
+ulCPUCapInfo: TBD
+usNBP0Voltage: VID for voltage on NB P0 State
+usNBP1Voltage: VID for voltage on NB P1 State
+usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+ to indicate a range.
+ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
+ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
+ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
+ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
+ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber: System memory channel numbers.
+ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
**********************************************************************************************************************/
/**************************************************************************/
@@ -3790,6 +4439,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
#define ASIC_INTERNAL_SS_ON_LVDS 6
#define ASIC_INTERNAL_SS_ON_DP 7
#define ASIC_INTERNAL_SS_ON_DCPLL 8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
@@ -3903,6 +4553,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
//Byte aligned defintion for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
@@ -4529,7 +5180,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{
#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
-
+//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code
+#define ACCESS_PLACEHOLDER 0x80
typedef struct _ATOM_MC_INIT_PARAM_TABLE
{
@@ -4554,6 +5206,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define _32Mx32 0x33
#define _64Mx8 0x41
#define _64Mx16 0x42
+#define _64Mx32 0x43
+#define _128Mx8 0x51
+#define _128Mx16 0x52
+#define _256Mx8 0x61
#define SAMSUNG 0x1
#define INFINEON 0x2
@@ -4569,10 +5225,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define QIMONDA INFINEON
#define PROMOS MOSEL
#define KRETON INFINEON
+#define ELIXIR NANYA
/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
-#define UCODE_ROM_START_ADDRESS 0x1c000
+#define UCODE_ROM_START_ADDRESS 0x1b800
#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
//uCode block header for reference
@@ -4903,7 +5560,34 @@ typedef struct _ATOM_VRAM_MODULE_V6
ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
}ATOM_VRAM_MODULE_V6;
-
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+ ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
+ USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
+ USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // Current memory module ID
+ UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+ UCHAR ucChannelNum; // Number of mem. channels supported in this module
+ UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+ UCHAR ucMisc; // RANK_OF_THISMEMORY etc.
+ UCHAR ucVREFI; // Not used.
+ UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+// Memory Module specific values
+ USHORT usEMRS2Value; // EMRS2/MR2 Value.
+ USHORT usEMRS3Value; // EMRS3/MR3 Value.
+ UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ char strMemPNString[20]; // part number end with '0'.
+}ATOM_VRAM_MODULE_V7;
typedef struct _ATOM_VRAM_INFO_V2
{
@@ -4942,6 +5626,20 @@ typedef struct _ATOM_VRAM_INFO_V4
// ATOM_INIT_REG_BLOCK aMemAdjust;
}ATOM_VRAM_INFO_V4;
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usReserved[4];
+ UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
+ UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
+ UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
+ UCHAR ucReserved;
+ ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5182,6 +5880,16 @@ typedef struct _ASIC_TRANSMITTER_INFO
UCHAR ucReserved;
}ASIC_TRANSMITTER_INFO;
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84
+
typedef struct _ASIC_ENCODER_INFO
{
UCHAR ucEncoderID;
@@ -5284,6 +5992,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS
/* /obselete */
#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+ USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucAuxId;
+ UCHAR ucAction;
+ UCHAR ucSinkType; // Iput and Output parameters.
+ UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+ DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02
+
+
// DP_TRAINING_TABLE
#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
@@ -5339,6 +6069,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
#define SELECT_DCIO_IMPCAL 4
#define SELECT_DCIO_DIG 6
#define SELECT_CRTC_PIXEL_RATE 7
+#define SELECT_VGA_BLK 8
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
@@ -5744,7 +6475,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -5841,6 +6582,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
USHORT usExtendendedHeaderOffset;
} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usReserved[2];
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed.
+ ULONG ulReserved;
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5864,6 +6628,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
@@ -5896,9 +6664,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
#define ATOM_PPLIB_M3ARB_MASK 0x00060000
#define ATOM_PPLIB_M3ARB_SHIFT 17
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
typedef struct _ATOM_PPLIB_NONCLOCK_INFO
{
USHORT usClassification;
@@ -5906,15 +6686,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
UCHAR ucMaxTemperature;
ULONG ulCapsAndSettings;
UCHAR ucRequiredPower;
- UCHAR ucUnused1[3];
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
} ATOM_PPLIB_NONCLOCK_INFO;
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
-
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -5985,6 +6765,93 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value
+ //please initalize to 0
+ UCHAR rsv;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ //this is for Sumo
+ ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
+}ClockInfoArray;
+
+typedef struct NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
/**************************************************************************/
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9fbabaa6ee4..b0ab185b86f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -403,6 +403,7 @@ union atom_enable_ss {
ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
static void atombios_crtc_program_ss(struct drm_crtc *crtc,
@@ -417,7 +418,30 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ args.v3.usSpreadSpectrumAmountFrac = 0;
+ args.v3.ucSpreadSpectrumType = ss->type;
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+ args.v3.usSpreadSpectrumAmount = ss->amount;
+ args.v3.usSpreadSpectrumStep = ss->step;
+ break;
+ case ATOM_PPLL2:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+ args.v3.usSpreadSpectrumAmount = ss->amount;
+ args.v3.usSpreadSpectrumStep = ss->step;
+ break;
+ case ATOM_DCPLL:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ args.v3.usSpreadSpectrumAmount = 0;
+ args.v3.usSpreadSpectrumStep = 0;
+ break;
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ args.v2.ucEnable = enable;
+ } else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type;
switch (pll_id) {
@@ -673,9 +697,14 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
PIXEL_CLOCK_PARAMETERS_V5 v5;
+ PIXEL_CLOCK_PARAMETERS_V6 v6;
};
-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+ u32 dispclk)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -698,9 +727,16 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
* SetPixelClock provides the dividers
*/
args.v5.ucCRTC = ATOM_CRTC_INVALID;
- args.v5.usPixelClock = rdev->clock.default_dispclk;
+ args.v5.usPixelClock = dispclk;
args.v5.ucPpll = ATOM_DCPLL;
break;
+ case 6:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v6.ulDispEngClkFreq = dispclk;
+ args.v6.ucPpll = ATOM_DCPLL;
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
@@ -784,6 +820,18 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucEncoderMode = encoder_mode;
args.v5.ucPpll = pll_id;
break;
+ case 6:
+ args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
+ args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
+ args.v6.ucRefDiv = ref_div;
+ args.v6.usFbDiv = cpu_to_le16(fb_div);
+ args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v6.ucPostDiv = post_div;
+ args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+ args.v6.ucTransmitterID = encoder_id;
+ args.v6.ucEncoderMode = encoder_mode;
+ args.v6.ucPpll = pll_id;
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
@@ -1377,7 +1425,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
- atombios_crtc_set_dcpll(crtc);
+ /* XXX: DCE5, make sure voltage, dispclk is high enough */
+ atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7b337c361a1..7fe8ebdcdc0 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,62 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
/* get temperature in millidegrees */
u32 evergreen_get_temp(struct radeon_device *rdev)
@@ -57,6 +113,14 @@ u32 evergreen_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
+u32 sumo_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+ u32 actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -337,16 +401,28 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
case 0:
case 4:
default:
- return 3840 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
case 1:
case 5:
- return 5760 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
case 2:
case 6:
- return 7680 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
case 3:
case 7:
- return 1920 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
}
}
@@ -890,31 +966,39 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ }
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
@@ -944,41 +1028,43 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ }
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
@@ -994,22 +1080,28 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ }
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
@@ -1057,11 +1149,17 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ if (rdev->flags & RADEON_IS_IGP) {
+ tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+ tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+ tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+ WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+ }
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
- WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
@@ -1285,11 +1383,15 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
+ case CHIP_PALM:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
+ case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
@@ -1384,6 +1486,46 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
+static void evergreen_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ }
+
+ switch (rdev->family) {
+ case CHIP_HEMLOCK:
+ case CHIP_CYPRESS:
+ case CHIP_BARTS:
+ tcp_chan_steer_lo = 0x54763210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ case CHIP_JUNIPER:
+ case CHIP_REDWOOD:
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ default:
+ tcp_chan_steer_lo = 0x76543210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ }
+
+ WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+ WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
@@ -1495,6 +1637,90 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
+ case CHIP_PALM:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_BARTS:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 7;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_TURKS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 6;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CAICOS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
}
/* Initialize HDP */
@@ -1636,6 +1862,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
+ case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
@@ -1687,6 +1914,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ evergreen_program_channel_remap(rdev);
+
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
@@ -1769,9 +1998,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
GS_PRIO(2) |
ES_PRIO(3));
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_CAICOS:
/* no vertex cache */
sq_config &= ~VC_ENABLE;
+ break;
+ default:
+ break;
+ }
sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
@@ -1783,10 +2019,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
ps_thread_count = 96;
- else
+ break;
+ default:
ps_thread_count = 128;
+ break;
+ }
sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
@@ -1817,10 +2058,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
FORCE_EOV_MAX_REZ_CNT(255)));
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_CAICOS:
vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
- else
+ break;
+ default:
vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ break;
+ }
vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
@@ -1904,12 +2151,18 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
- /* size in MB on evergreen */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* size in bytes on fusion */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ } else {
+ /* size in MB on evergreen */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ }
rdev->mc.visible_vram_size = rdev->mc.aper_size;
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
- r600_vram_gtt_location(rdev, &rdev->mc);
+ r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
@@ -1917,8 +2170,30 @@ int evergreen_mc_init(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
- /* FIXME: implement for evergreen */
- return false;
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status_se0, grbm_status_se1;
+ struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
+ int r;
+
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ r100_gpu_lockup_update(lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2011,17 +2286,21 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2047,6 +2326,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
+ u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2072,27 +2352,33 @@ int evergreen_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[2]) {
+ if (rdev->irq.crtc_vblank_int[2] ||
+ rdev->irq.pflip[2]) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[3]) {
+ if (rdev->irq.crtc_vblank_int[3] ||
+ rdev->irq.pflip[3]) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[4]) {
+ if (rdev->irq.crtc_vblank_int[4] ||
+ rdev->irq.pflip[4]) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[5]) {
+ if (rdev->irq.crtc_vblank_int[5] ||
+ rdev->irq.pflip[5]) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
@@ -2130,10 +2416,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
- WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
- WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
- WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
- WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ }
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2145,79 +2440,92 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void evergreen_irq_ack(struct radeon_device *rdev,
- u32 *disp_int,
- u32 *disp_int_cont,
- u32 *disp_int_cont2,
- u32 *disp_int_cont3,
- u32 *disp_int_cont4,
- u32 *disp_int_cont5)
+static inline void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
- *disp_int = RREG32(DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
- *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
- *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
- *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
-
- if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+ rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp);
}
- if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp);
}
- if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp);
}
- if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
- if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
- if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -2226,14 +2534,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev,
void evergreen_irq_disable(struct radeon_device *rdev)
{
- u32 disp_int, disp_int_cont, disp_int_cont2;
- u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
-
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
- evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
- &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_irq_ack(rdev);
evergreen_disable_interrupt_state(rdev);
}
@@ -2273,8 +2577,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
u32 ring_index;
- u32 disp_int, disp_int_cont, disp_int_cont2;
- u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
unsigned long flags;
bool queue_hotplug = false;
@@ -2295,8 +2597,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
restart_ih:
/* display interrupts */
- evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
- &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
@@ -2309,17 +2610,21 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (disp_int & LB_D1_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D1_VLINE_INTERRUPT) {
- disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
@@ -2331,17 +2636,21 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D2 vline */
- if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
@@ -2353,17 +2662,21 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[2])
+ radeon_crtc_handle_flip(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
break;
case 1: /* D3 vline */
- if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n");
}
break;
@@ -2375,17 +2688,21 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[3])
+ radeon_crtc_handle_flip(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
break;
case 1: /* D4 vline */
- if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n");
}
break;
@@ -2397,17 +2714,21 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[4])
+ radeon_crtc_handle_flip(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
break;
case 1: /* D5 vline */
- if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n");
}
break;
@@ -2419,17 +2740,21 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[5])
+ radeon_crtc_handle_flip(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
break;
case 1: /* D6 vline */
- if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n");
}
break;
@@ -2441,43 +2766,43 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (disp_int & DC_HPD1_INTERRUPT) {
- disp_int &= ~DC_HPD1_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
- if (disp_int_cont & DC_HPD2_INTERRUPT) {
- disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 2:
- if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 3:
- if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
- disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 4:
- if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
- disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 5:
- if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
- disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
@@ -2516,7 +2841,7 @@ restart_ih:
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -2527,12 +2852,31 @@ static int evergreen_startup(struct radeon_device *rdev)
{
int r;
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
+ /* enable pcie gen2 link */
+ if (!ASIC_IS_DCE5(rdev))
+ evergreen_pcie_gen2_enable(rdev);
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ r = btc_mc_load_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load firmware!\n");
+ DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
}
evergreen_mc_program(rdev);
@@ -2551,6 +2895,11 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
+ /* XXX: ontario has problems blitting to gart at the moment */
+ if (rdev->family == CHIP_PALM) {
+ rdev->asic->copy = NULL;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -2658,12 +3007,16 @@ static bool evergreen_card_posted(struct radeon_device *rdev)
u32 reg;
/* first check CRTCs */
- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (rdev->flags & RADEON_IS_IGP)
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ else
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
@@ -2800,3 +3153,52 @@ void evergreen_fini(struct radeon_device *rdev)
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
+
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, speed_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index e0e590110dd..b758dc7f2f2 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -147,7 +147,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
- if (rdev->family == CHIP_CEDAR)
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_CAICOS))
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, 48, gpu_addr);
else
@@ -331,9 +333,95 @@ set_default_state(struct radeon_device *rdev)
num_hs_stack_entries = 85;
num_ls_stack_entries = 85;
break;
+ case CHIP_PALM:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_BARTS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_TURKS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_CAICOS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 10;
+ num_gs_threads = 10;
+ num_es_threads = 10;
+ num_hs_threads = 10;
+ num_ls_threads = 10;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
}
- if (rdev->family == CHIP_CEDAR)
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_CAICOS))
sq_config = 0;
else
sq_config = VC_ENABLE;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 2330f3a36fd..c781c92c345 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -105,6 +105,11 @@
#define EVERGREEN_GRPH_Y_START 0x6830
#define EVERGREEN_GRPH_X_END 0x6834
#define EVERGREEN_GRPH_Y_END 0x6838
+#define EVERGREEN_GRPH_UPDATE 0x6844
+# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
+# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
#define EVERGREEN_CUR_CONTROL 0x6998
@@ -178,6 +183,7 @@
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a73b53c4435..36d32d83d86 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -164,11 +164,13 @@
#define SE_SC_BUSY (1 << 29)
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
-
+/* evergreen */
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x7FF0000
#define ASIC_T_SHIFT 16
+/* APU */
+#define CG_THERMAL_STATUS 0x678
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
@@ -181,6 +183,7 @@
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
@@ -200,6 +203,7 @@
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
#define MC_VM_FB_LOCATION 0x2024
+#define MC_FUS_VM_FB_OFFSET 0x2898
#define MC_VM_MB_L1_TLB0_CNTL 0x2234
#define MC_VM_MB_L1_TLB1_CNTL 0x2238
#define MC_VM_MB_L1_TLB2_CNTL 0x223C
@@ -349,6 +353,9 @@
#define SYNC_WALKER (1 << 25)
#define SYNC_ALIGNER (1 << 26)
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x) << 0)
#define VC_ONLY 0
@@ -574,6 +581,44 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
/*
* PM4
*/
@@ -603,7 +648,7 @@
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
@@ -644,14 +689,14 @@
# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
-# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
# define PACKET3_FULL_CACHE_ENA (1 << 20)
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
-# define PACKET3_SMX_ACTION_ENA (1 << 28)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 00000000000..5e0bef80ad7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_drm.h"
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00916a00}
+};
+
+int btc_mc_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ u32 mem_type, running, blackout = 0;
+ u32 *io_mc_regs;
+ int i;
+
+ if (!rdev->mc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ io_mc_regs = (u32 *)&barts_io_mc_regs;
+ break;
+ case CHIP_TURKS:
+ io_mc_regs = (u32 *)&turks_io_mc_regs;
+ break;
+ case CHIP_CAICOS:
+ default:
+ io_mc_regs = (u32 *)&caicos_io_mc_regs;
+ break;
+ }
+
+ mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+ running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+ if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+ if (running) {
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+ }
+
+ /* reset the engine and set to writable */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
+ /* load the MC ucode */
+ fw_data = (const __be32 *)rdev->mc_fw->data;
+ for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+ /* put the engine back into the active state */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
+ udelay(10);
+
+ if (running)
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+ }
+
+ return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ const char *chip_name;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ chip_name = "BARTS";
+ rlc_chip_name = "BTC";
+ break;
+ case CHIP_TURKS:
+ chip_name = "TURKS";
+ rlc_chip_name = "BTC";
+ break;
+ case CHIP_CAICOS:
+ chip_name = "CAICOS";
+ rlc_chip_name = "BTC";
+ break;
+ default: BUG();
+ }
+
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+
+ DRM_INFO("Loading %s Microcode\n", chip_name);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mc_fw->size != mc_req_size) {
+ printk(KERN_ERR
+ "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+out:
+ platform_device_unregister(pdev);
+
+ if (err) {
+ if (err != -EINVAL)
+ printk(KERN_ERR
+ "ni_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->pfp_fw);
+ rdev->pfp_fw = NULL;
+ release_firmware(rdev->me_fw);
+ rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
+ release_firmware(rdev->mc_fw);
+ rdev->mc_fw = NULL;
+ }
+ return err;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 00000000000..5db7b7d6feb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL 0x6840
+# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_GAMMA_USE_LUT 0
+# define NI_INPUT_GAMMA_BYPASS 1
+# define NI_INPUT_GAMMA_SRGB_24 2
+# define NI_INPUT_GAMMA_XVYCC_222 3
+# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL 0x68b4
+# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL 0x68c4
+# define NI_OVL_PRESCALE_BYPASS (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL 0x68d4
+# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_CSC_BYPASS 0
+# define NI_INPUT_CSC_PROG_COEFF 1
+# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
+# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL 0x68f0
+# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
+# define NI_OUTPUT_CSC_BYPASS 0
+# define NI_OUTPUT_CSC_TV_RGB 1
+# define NI_OUTPUT_CSC_YCBCR_601 2
+# define NI_OUTPUT_CSC_YCBCR_709 3
+# define NI_OUTPUT_CSC_PROG_COEFF 4
+# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL 0x6960
+# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_DEGAMMA_BYPASS 0
+# define NI_DEGAMMA_SRGB_24 1
+# define NI_DEGAMMA_XVYCC_222 2
+# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
+# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL 0x6964
+# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
+# define NI_GAMUT_REMAP_BYPASS 0
+# define NI_GAMUT_REMAP_PROG_COEFF 1
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL 0x6a80
+# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
+# define NI_REGAMMA_BYPASS 0
+# define NI_REGAMMA_SRGB_24 1
+# define NI_REGAMMA_XVYCC_222 2
+# define NI_REGAMMA_PROG_A 3
+# define NI_REGAMMA_PROG_B 4
+# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 00000000000..f7b445390e0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_SEQ_SUP_CNTL 0x28c8
+#define RUN_MASK (1 << 0)
+#define MC_SEQ_SUP_PGM 0x28cc
+#define MC_IO_PAD_CNTL_D0 0x29d0
+#define MEM_FALL_OUT_CMD (1 << 8)
+#define MC_SEQ_MISC0 0x2a00
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
+#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8e10aa9f74b..f637595b14e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
+ tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
+ /* make sure pending bit is asserted */
+ tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+ WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen as late as possible in the vblank interval.
+ * same field for crtc1/2
+ */
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+
+ /* Lock the graphics update lock */
+ /* update the scanout addresses */
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
@@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
if (rdev->irq.hpd[0]) {
@@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev)
}
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
@@ -622,7 +682,7 @@ int r100_irq_process(struct radeon_device *rdev)
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index b121b6c678d..eab91760fae 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -551,7 +551,7 @@
#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
#define C_000360_CUR2_LOCK 0x7FFFFFFF
-#define R_0003C2_GENMO_WT 0x0003C0
+#define R_0003C2_GENMO_WT 0x0003C2
#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cde1d3480d9..fae5e709f27 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -558,10 +558,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev)
/* FIXME wait for idle */
- if (rdev->family < CHIP_R600)
- link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
- else
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
case RADEON_PCIE_LC_LINK_WIDTH_X0:
@@ -745,6 +742,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4E00:
/* RB3D_CCTL */
+ if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+ p->rdev->cmask_filp != p->filp) {
+ DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+ return -EINVAL;
+ }
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
break;
case 0x4E38:
@@ -787,6 +789,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 15:
track->cb[i].cpp = 2;
break;
+ case 5:
+ if (p->rdev->family < CHIP_RV515) {
+ DRM_ERROR("Invalid color buffer format (%d)!\n",
+ ((idx_value >> 21) & 0xF));
+ return -EINVAL;
+ }
+ /* Pass through. */
case 6:
track->cb[i].cpp = 4;
break;
@@ -1199,6 +1208,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
if (p->rdev->hyperz_filp != p->filp)
return -EINVAL;
break;
+ case PACKET3_3D_CLEAR_CMASK:
+ if (p->rdev->cmask_filp != p->filp)
+ return -EINVAL;
+ break;
case PACKET3_NOP:
break;
default:
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 0c036c60d9d..1f519a5ffb8 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -54,6 +54,7 @@
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
#define PACKET3_3D_CLEAR_HIZ 0x37
+#define PACKET3_3D_CLEAR_CMASK 0x38
#define PACKET3_BITBLT_MULTI 0x9B
#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 6ac1f604e29..fc437059918 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,8 @@
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
@@ -409,8 +411,10 @@
#define AVIVO_D1GRPH_X_END 0x6134
#define AVIVO_D1GRPH_Y_END 0x6138
#define AVIVO_D1GRPH_UPDATE 0x6144
+# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
+# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
#define AVIVO_D1CUR_CONTROL 0x6400
# define AVIVO_D1CURSOR_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9c92db7c896..6b50716267c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -83,6 +83,9 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -91,6 +94,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
/* get temperature in millidegrees */
u32 rv6xx_get_temp(struct radeon_device *rdev)
@@ -1164,7 +1168,7 @@ static void r600_mc_program(struct radeon_device *rdev)
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*/
-void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
@@ -2009,6 +2013,10 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
break;
+ case CHIP_PALM:
+ chip_name = "PALM";
+ rlc_chip_name = "SUMO";
+ break;
default: BUG();
}
@@ -2372,6 +2380,9 @@ int r600_startup(struct radeon_device *rdev)
{
int r;
+ /* enable pcie gen2 link */
+ r600_pcie_gen2_enable(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -2874,6 +2885,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2998,6 +3011,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi1, hdmi2;
+ u32 d1grph = 0, d2grph = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,11 +3048,13 @@ int r600_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK;
}
@@ -3081,6 +3097,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
@@ -3103,32 +3121,35 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void r600_irq_ack(struct radeon_device *rdev,
- u32 *disp_int,
- u32 *disp_int_cont,
- u32 *disp_int_cont2)
+static inline void r600_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
if (ASIC_IS_DCE3(rdev)) {
- *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
} else {
- *disp_int = RREG32(DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = 0;
- }
-
- if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+ }
+ rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
+
+ if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
- if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
- if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
- if (*disp_int & LB_D2_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
- if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3139,7 +3160,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
}
- if (*disp_int & DC_HPD2_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3150,7 +3171,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
}
- if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3161,18 +3182,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
}
}
- if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
if (ASIC_IS_DCE32(rdev)) {
- if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
- if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -3194,12 +3215,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
void r600_irq_disable(struct radeon_device *rdev)
{
- u32 disp_int, disp_int_cont, disp_int_cont2;
-
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
- r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_irq_ack(rdev);
r600_disable_interrupt_state(rdev);
}
@@ -3262,7 +3281,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
- u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+ u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
@@ -3283,7 +3302,7 @@ int r600_irq_process(struct radeon_device *rdev)
restart_ih:
/* display interrupts */
- r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
@@ -3296,17 +3315,21 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (disp_int & LB_D1_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D1_VLINE_INTERRUPT) {
- disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
@@ -3318,17 +3341,21 @@ restart_ih:
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (disp_int & LB_D2_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D2_VLINE_INTERRUPT) {
- disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
@@ -3340,43 +3367,43 @@ restart_ih:
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (disp_int & DC_HPD1_INTERRUPT) {
- disp_int &= ~DC_HPD1_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
- if (disp_int & DC_HPD2_INTERRUPT) {
- disp_int &= ~DC_HPD2_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 4:
- if (disp_int_cont & DC_HPD3_INTERRUPT) {
- disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 5:
- if (disp_int_cont & DC_HPD4_INTERRUPT) {
- disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 10:
- if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 12:
- if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
@@ -3419,7 +3446,7 @@ restart_ih:
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3508,3 +3535,219 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+ u32 link_width_cntl, mask, target_reg;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* FIXME wait for idle */
+
+ switch (lanes) {
+ case 0:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+ break;
+ case 12:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+ break;
+ case 16:
+ default:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+ break;
+ }
+
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+ return;
+
+ if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+ return;
+
+ link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+ RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RENEGOTIATE_EN |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= mask;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ /* some northbridges can renegotiate the link rather than requiring
+ * a complete re-config.
+ * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
+ */
+ if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
+ link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
+ else
+ link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+ RADEON_PCIE_LC_RECONFIG_NOW));
+
+ if (rdev->family >= CHIP_RV770)
+ target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
+ else
+ target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
+
+ /* wait for lane set to complete */
+ link_width_cntl = RREG32(target_reg);
+ while (link_width_cntl == 0xffffffff)
+ link_width_cntl = RREG32(target_reg);
+
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return 0;
+
+ /* FIXME wait for idle */
+
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+ u16 link_cntl2;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* only RV6xx+ chips are supported */
+ if (rdev->family <= CHIP_R600)
+ return;
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ WREG32(MM_CFGREGS_CNTL, 0x8);
+ link_cntl2 = RREG32(0x4088);
+ WREG32(MM_CFGREGS_CNTL, 0);
+ /* not supported yet */
+ if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+ return;
+ }
+
+ speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+ speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+ speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+ speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+ training_cntl &= ~LC_POINT_7_PLUS_EN;
+ WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+ } else {
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bff4dc4f410..a5d898b4bad 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -728,6 +728,54 @@
/* DCE 3.2 */
# define DC_HPDx_EN (1 << 28)
+#define D1GRPH_INTERRUPT_STATUS 0x6158
+#define D2GRPH_INTERRUPT_STATUS 0x6958
+# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL 0x615c
+#define D2GRPH_INTERRUPT_CONTROL 0x695c
+# define DxGRPH_PFLIP_INT_MASK (1 << 0)
+# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+# define LC_POINT_7_PLUS_EN (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a7095743d4..e9486630a46 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -69,6 +69,7 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
#include "radeon_family.h"
#include "radeon_mode.h"
@@ -180,6 +181,7 @@ void rs690_pm_info(struct radeon_device *rdev);
extern u32 rv6xx_get_temp(struct radeon_device *rdev);
extern u32 rv770_get_temp(struct radeon_device *rdev);
extern u32 evergreen_get_temp(struct radeon_device *rdev);
+extern u32 sumo_get_temp(struct radeon_device *rdev);
/*
* Fences.
@@ -259,13 +261,12 @@ struct radeon_bo {
};
struct radeon_bo_list {
- struct list_head list;
+ struct ttm_validate_buffer tv;
struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
- bool reserved;
};
/*
@@ -377,11 +378,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* IRQS.
*/
+
+struct radeon_unpin_work {
+ struct work_struct work;
+ struct radeon_device *rdev;
+ int crtc_id;
+ struct radeon_fence *fence;
+ struct drm_pending_vblank_event *event;
+ struct radeon_bo *old_rbo;
+ u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+ u32 disp_int;
+};
+
+struct r600_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 d1grph_int;
+ u32 d2grph_int;
+};
+
+struct evergreen_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 disp_int_cont3;
+ u32 disp_int_cont4;
+ u32 disp_int_cont5;
+ u32 d1grph_int;
+ u32 d2grph_int;
+ u32 d3grph_int;
+ u32 d4grph_int;
+ u32 d5grph_int;
+ u32 d6grph_int;
+};
+
+union radeon_irq_stat_regs {
+ struct r500_irq_stat_regs r500;
+ struct r600_irq_stat_regs r600;
+ struct evergreen_irq_stat_regs evergreen;
+};
+
struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[6];
+ bool pflip[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
@@ -392,12 +438,17 @@ struct radeon_irq {
bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
+ union radeon_irq_stat_regs stat_regs;
+ spinlock_t pflip_lock[6];
+ int pflip_refcount[6];
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
* CP & ring.
@@ -687,6 +738,8 @@ enum radeon_int_thermal_type {
THERMAL_TYPE_RV6XX,
THERMAL_TYPE_RV770,
THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
};
struct radeon_voltage {
@@ -770,6 +823,9 @@ struct radeon_pm {
u32 current_sclk;
u32 current_mclk;
u32 current_vddc;
+ u32 default_sclk;
+ u32 default_mclk;
+ u32 default_vddc;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
@@ -881,6 +937,10 @@ struct radeon_asic {
void (*pm_finish)(struct radeon_device *rdev);
void (*pm_init_profile)(struct radeon_device *rdev);
void (*pm_get_dynpm_state)(struct radeon_device *rdev);
+ /* pageflipping */
+ void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
};
/*
@@ -975,6 +1035,7 @@ struct evergreen_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
@@ -1091,11 +1152,11 @@ struct radeon_device {
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
+ const struct firmware *mc_fw; /* NI MC firmware */
struct r600_blit r600_blit;
struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
- struct workqueue_struct *wq;
struct work_struct hotplug_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -1110,10 +1171,10 @@ struct radeon_device {
uint8_t audio_status_bits;
uint8_t audio_category_code;
- bool powered_down;
struct notifier_block acpi_nb;
- /* only one userspace can use Hyperz features at a time */
+ /* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
+ struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
};
@@ -1188,6 +1249,8 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
@@ -1261,6 +1324,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RV410) || \
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+ (rdev->ddev->pdev->device == 0x9443) || \
+ (rdev->ddev->pdev->device == 0x944B) || \
+ (rdev->ddev->pdev->device == 0x9506) || \
+ (rdev->ddev->pdev->device == 0x9509) || \
+ (rdev->ddev->pdev->device == 0x950F) || \
+ (rdev->ddev->pdev->device == 0x689C) || \
+ (rdev->ddev->pdev->device == 0x689D))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
(rdev->family == CHIP_RS690) || \
@@ -1269,6 +1340,9 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+ (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
/*
* BIOS helpers.
@@ -1344,6 +1418,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
/* Common functions */
/* AGP */
@@ -1372,67 +1449,7 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-
-/* rv200,rv250,rv280 */
-extern void r200_set_safe_registers(struct radeon_device *rdev);
-
-/* r300,r350,rv350,rv370,rv380 */
-extern void r300_set_reg_safe(struct radeon_device *rdev);
-extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_mc_init(struct radeon_device *rdev);
-extern void r300_clock_startup(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
-extern int rv370_pcie_gart_init(struct radeon_device *rdev);
-extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
-extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
-extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-
-/* r420,r423,rv410 */
-extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
-extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
-extern void r420_pipes_init(struct radeon_device *rdev);
-
-/* rv515 */
-struct rv515_mc_save {
- u32 d1vga_control;
- u32 d2vga_control;
- u32 vga_render_control;
- u32 vga_hdp_control;
- u32 d1crtc_control;
- u32 d2crtc_control;
-};
-extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
-extern void rv515_vga_render_disable(struct radeon_device *rdev);
-extern void rv515_set_safe_registers(struct radeon_device *rdev);
-extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_clock_startup(struct radeon_device *rdev);
-extern void rv515_debugfs(struct radeon_device *rdev);
-extern int rv515_suspend(struct radeon_device *rdev);
-
-/* rs400 */
-extern int rs400_gart_init(struct radeon_device *rdev);
-extern int rs400_gart_enable(struct radeon_device *rdev);
-extern void rs400_gart_adjust_size(struct radeon_device *rdev);
-extern void rs400_gart_disable(struct radeon_device *rdev);
-extern void rs400_gart_fini(struct radeon_device *rdev);
-
-/* rs600 */
-extern void rs600_set_safe_registers(struct radeon_device *rdev);
-extern int rs600_irq_set(struct radeon_device *rdev);
-extern void rs600_irq_disable(struct radeon_device *rdev);
-
-/* rs690, rs740 */
-extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
- struct drm_display_mode *mode1,
- struct drm_display_mode *mode2);
-
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern int r600_cp_start(struct radeon_device *rdev);
@@ -1478,6 +1495,7 @@ extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mo
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern void r700_cp_stop(struct radeon_device *rdev);
extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -1485,6 +1503,9 @@ extern int evergreen_irq_set(struct radeon_device *rdev);
extern int evergreen_blit_init(struct radeon_device *rdev);
extern void evergreen_blit_fini(struct radeon_device *rdev);
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int btc_mc_load_microcode(struct radeon_device *rdev);
+
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 64fb89ecbf7..3a1b1618622 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r200_asic = {
@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic = {
@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic_pcie = {
@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r420_asic = {
@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs400_asic = {
@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs600_asic = {
@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs690_asic = {
@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv515_asic = {
@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r520_asic = {
@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r600_asic = {
@@ -601,8 +631,8 @@ static struct radeon_asic r600_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs780_asic = {
@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv770_asic = {
@@ -689,8 +725,8 @@ static struct radeon_asic rv770_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rv770_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic evergreen_asic = {
@@ -733,6 +772,95 @@ static struct radeon_asic evergreen_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+};
+
+static struct radeon_asic sumo_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic btc_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
@@ -749,6 +877,9 @@ static struct radeon_asic evergreen_asic = {
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
int radeon_asic_init(struct radeon_device *rdev)
@@ -835,6 +966,14 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_HEMLOCK:
rdev->asic = &evergreen_asic;
break;
+ case CHIP_PALM:
+ rdev->asic = &sumo_asic;
+ break;
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ rdev->asic = &btc_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -849,7 +988,9 @@ int radeon_asic_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else {
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE41(rdev))
+ rdev->num_crtc = 2;
+ else if (ASIC_IS_DCE4(rdev))
rdev->num_crtc = 6;
else
rdev->num_crtc = 2;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 74098824414..e01f0771853 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -102,6 +102,11 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
+ struct radeon_cp *cp);
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
+ struct r100_gpu_lockup *lockup,
+ struct radeon_cp *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_init(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
@@ -130,15 +135,19 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
/*
* r200,rv250,rs300,rv280
*/
extern int r200_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
struct radeon_fence *fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
/*
* r300,r350,rv350,rv380
@@ -159,6 +168,15 @@ extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/*
* r420,r423,rv410
@@ -168,6 +186,10 @@ extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
/*
* rs400,rs480
@@ -180,6 +202,12 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+
/*
* rs600.
@@ -191,6 +219,7 @@ extern int rs600_suspend(struct radeon_device *rdev);
extern int rs600_resume(struct radeon_device *rdev);
int rs600_irq_set(struct radeon_device *rdev);
int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -205,6 +234,11 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+
/*
* rs690,rs740
@@ -216,10 +250,21 @@ int rs690_suspend(struct radeon_device *rdev);
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ struct drm_display_mode *mode1,
+ struct drm_display_mode *mode2);
/*
* rv515
*/
+struct rv515_mc_save {
+ u32 d1vga_control;
+ u32 d2vga_control;
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 d1crtc_control;
+ u32 d2crtc_control;
+};
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -230,6 +275,14 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+
/*
* r520,rv530,rv560,rv570,r580
@@ -278,6 +331,8 @@ extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -287,6 +342,7 @@ void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
extern void rv770_pm_misc(struct radeon_device *rdev);
+extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
/*
* evergreen
@@ -314,5 +370,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bc5a2c3382d..1573202a641 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -37,7 +37,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
- uint32_t supported_device);
+ uint32_t supported_device, u16 caps);
/* from radeon_connector.c */
extern void
@@ -313,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
- struct radeon_device *rdev = dev->dev_private;
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x791e) &&
@@ -388,6 +387,17 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*line_mux = 0x90;
}
+ /* mac rv630 */
+ if ((dev->pdev->device == 0x9588) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x00a6)) {
+ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+ (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+ *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+ *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+ }
+ }
+
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x9598) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
@@ -425,21 +435,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
+ /* Acer laptop (Acer TravelMate 5730G) has an HDMI port
+ * on the laptop and a DVI port on the docking station and
+ * both share the same encoder, hpd pin, and ddc line.
+ * So while the bios table is technically correct,
+ * we drop the DVI port here since xrandr has no concept of
+ * encoders and will try and drive both connectors
+ * with different crtcs which isn't possible on the hardware
+ * side and leaves no crtcs for LVDS or VGA.
+ */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
- struct radeon_gpio_rec gpio;
-
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
- gpio = radeon_lookup_gpio(rdev, 6);
- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ /* actually it's a DVI-D port not DVI-I */
*connector_type = DRM_MODE_CONNECTOR_DVID;
- } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
- (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
- gpio = radeon_lookup_gpio(rdev, 7);
- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ return false;
}
}
@@ -525,6 +537,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
u16 size, data_offset;
u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ ATOM_ENCODER_OBJECT_TABLE *enc_obj;
ATOM_OBJECT_TABLE *router_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
@@ -549,6 +562,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+ enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usEncoderObjectTableOffset));
router_obj = (ATOM_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usRouterObjectTableOffset));
@@ -654,14 +670,35 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
-
- radeon_add_atom_encoder(dev,
- encoder_obj,
- le16_to_cpu
- (path->
- usDeviceTag));
+ for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+ u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+ if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+ ATOM_ENCODER_CAP_RECORD *cap_record;
+ u16 caps = 0;
+ while (record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_ENCODER_CAP_RECORD_TYPE:
+ cap_record =(ATOM_ENCODER_CAP_RECORD *)
+ record;
+ caps = le16_to_cpu(cap_record->usEncoderCap);
+ break;
+ }
+ record = (ATOM_COMMON_RECORD_HEADER *)
+ ((char *)record + record->ucRecordSize);
+ }
+ radeon_add_atom_encoder(dev,
+ encoder_obj,
+ le16_to_cpu
+ (path->
+ usDeviceTag),
+ caps);
+ }
+ }
} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
@@ -995,7 +1032,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
radeon_get_encoder_enum(dev,
(1 << i),
dac),
- (1 << i));
+ (1 << i),
+ 0);
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
@@ -1074,6 +1112,7 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V1_3 info_13;
ATOM_FIRMWARE_INFO_V1_4 info_14;
ATOM_FIRMWARE_INFO_V2_1 info_21;
+ ATOM_FIRMWARE_INFO_V2_2 info_22;
};
bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -1148,8 +1187,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
*p2pll = *p1pll;
/* system clock */
- spll->reference_freq =
- le16_to_cpu(firmware_info->info.usReferenceClock);
+ if (ASIC_IS_DCE4(rdev))
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+ else
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
spll->reference_div = 0;
spll->pll_out_min =
@@ -1171,8 +1214,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
/* memory clock */
- mpll->reference_freq =
- le16_to_cpu(firmware_info->info.usReferenceClock);
+ if (ASIC_IS_DCE4(rdev))
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+ else
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
mpll->reference_div = 0;
mpll->pll_out_min =
@@ -1201,8 +1248,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
if (ASIC_IS_DCE4(rdev)) {
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
- if (rdev->clock.default_dispclk == 0)
- rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ if (rdev->clock.default_dispclk == 0) {
+ if (ASIC_IS_DCE5(rdev))
+ rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+ else
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ }
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
}
@@ -1337,6 +1388,43 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
return false;
}
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ u16 data_offset, size;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+ u8 frev, crev;
+ u16 percentage = 0, rate = 0;
+
+ /* get any igp specific overrides */
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+ break;
+ }
+ if (percentage)
+ ss->percentage = percentage;
+ if (rate)
+ ss->rate = rate;
+ }
+}
+
union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO info;
struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
@@ -1401,6 +1489,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ if (rdev->flags & RADEON_IS_IGP)
+ radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
}
@@ -1477,6 +1567,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
if (misc & ATOM_DOUBLE_CLOCK_MODE)
lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+ lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
+ lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1489,6 +1582,59 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
else
lvds->linkb = false;
+ /* parse the lcd record table */
+ if (lvds_info->info.usModePatchTableOffset) {
+ ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+ ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+ bool bad_record = false;
+ u8 *record = (u8 *)(mode_info->atom_context->bios +
+ data_offset +
+ lvds_info->info.usModePatchTableOffset);
+ while (*record != ATOM_RECORD_END_TYPE) {
+ switch (*record) {
+ case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+ record += sizeof(ATOM_PATCH_RECORD_MODE);
+ break;
+ case LCD_RTS_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_RTS_RECORD);
+ break;
+ case LCD_CAP_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+ break;
+ case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+ fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+ if (fake_edid_record->ucFakeEDIDLength) {
+ struct edid *edid;
+ int edid_size =
+ max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+ edid = kmalloc(edid_size, GFP_KERNEL);
+ if (edid) {
+ memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+ fake_edid_record->ucFakeEDIDLength);
+
+ if (drm_edid_is_valid(edid))
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ else
+ kfree(edid);
+ }
+ }
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+ lvds->native_mode.width_mm = panel_res_record->usHSize;
+ lvds->native_mode.height_mm = panel_res_record->usVSize;
+ record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+ break;
+ default:
+ DRM_ERROR("Bad LCD record %d\n", *record);
+ bad_record = true;
+ break;
+ }
+ if (bad_record)
+ break;
+ }
+ }
}
return lvds;
}
@@ -1740,496 +1886,614 @@ static const char *pp_lib_thermal_controller_names[] = {
"RV6xx",
"RV770",
"adt7473",
+ "NONE",
"External GPIO",
"Evergreen",
- "adt7473 with internal",
-
+ "emc2103",
+ "Sumo",
+ "Northern Islands",
};
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
};
-void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+ int state_index,
+ u32 misc, u32 misc2)
+{
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
+ u32 misc, misc2 = 0;
+ int num_modes = 0, i;
+ int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
+ union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
- u32 misc, misc2 = 0, sclk, mclk;
- union power_info *power_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- struct _ATOM_PPLIB_STATE *power_state;
- int num_modes = 0, i, j;
- int state_index = 0, mode_index = 0;
- struct radeon_i2c_bus_rec i2c_bus;
-
- rdev->pm.default_power_state_index = -1;
- if (atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
- if (frev < 4) {
- /* add the i2c bus for thermal/fan chip */
- if (power_info->info.ucOverdriveThermalController > 0) {
- DRM_INFO("Possible %s thermal controller at 0x%02x\n",
- thermal_controller_names[power_info->info.ucOverdriveThermalController],
- power_info->info.ucOverdriveControllerAddress >> 1);
- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
- if (rdev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = thermal_controller_names[power_info->info.
- ucOverdriveThermalController];
- info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
- }
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ /* add the i2c bus for thermal/fan chip */
+ if (power_info->info.ucOverdriveThermalController > 0) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ thermal_controller_names[power_info->info.ucOverdriveThermalController],
+ power_info->info.ucOverdriveControllerAddress >> 1);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+ }
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ /* last mode is usually default, array is low to high */
+ for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ switch (frev) {
+ case 1:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
}
- num_modes = power_info->info.ucNumOfPowerModeEntries;
- if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
- num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
- /* last mode is usually default, array is low to high */
- for (i = 0; i < num_modes; i++) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- switch (frev) {
- case 1:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- case 2:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
- misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- case 3:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
- misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
- if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
- true;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
- power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
- }
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+ state_index++;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
}
- /* last mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
- rdev->pm.power_state[state_index - 1].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index - 1;
- rdev->pm.power_state[state_index - 1].default_clock_mode =
- &rdev->pm.power_state[state_index - 1].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = 0;
- rdev->pm.power_state[state_index].misc2 = 0;
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+ if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+ true;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+ }
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ }
+ }
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+ ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ /* add the i2c bus for thermal/fan chip */
+ if (controller->ucType > 0) {
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else {
- int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
- uint8_t fw_frev, fw_crev;
- uint16_t fw_data_offset, vddc = 0;
- union firmware_info *firmware_info;
- ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
-
- if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
- &fw_frev, &fw_crev, &fw_data_offset)) {
- firmware_info =
- (union firmware_info *)(mode_info->atom_context->bios +
- fw_data_offset);
- vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
+ }
+ }
+}
- /* add the i2c bus for thermal/fan chip */
- if (controller->ucType > 0) {
- if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
- } else if ((controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
- DRM_INFO("Special thermal controller config\n");
- } else {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[controller->ucType],
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
- if (rdev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = pp_lib_thermal_controller_names[controller->ucType];
- info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
- }
+static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ u8 frev, crev;
+ u16 data_offset;
+ union firmware_info *firmware_info;
+ u16 vddc = 0;
- }
- }
- /* first mode is usually default, followed by low to high */
- for (i = 0; i < power_info->info_4.ucNumStates; i++) {
- mode_index = 0;
- power_state = (struct _ATOM_PPLIB_STATE *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usStateArrayOffset) +
- i * power_info->info_4.ucStateEntrySize);
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
- (power_state->ucNonClockStateIndex *
- power_info->info_4.ucNonClockSize));
- for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
- if (rdev->flags & RADEON_IS_IGP) {
- struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
- sclk |= clock_info->ucLowEngineClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
- continue;
- /* voltage works differently on IGPs */
- mode_index++;
- } else if (ASIC_IS_DCE4(rdev)) {
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usEngineClockLow);
- sclk |= clock_info->ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->usMemoryClockLow);
- mclk |= clock_info->ucMemoryClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
- /* XXX usVDDCI */
- mode_index++;
- } else {
- struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usEngineClockLow);
- sclk |= clock_info->ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->usMemoryClockLow);
- mclk |= clock_info->ucMemoryClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
- mode_index++;
- }
- }
- rdev->pm.power_state[state_index].num_clock_modes = mode_index;
- if (mode_index) {
- misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- misc2 = le16_to_cpu(non_clock_info->usClassification);
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- rdev->pm.power_state[state_index].pcie_lanes =
- ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
- ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
- switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
- if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- break;
- }
- rdev->pm.power_state[state_index].flags = 0;
- if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- rdev->pm.power_state[state_index].flags |=
- RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- /* patch the table values with the default slck/mclk from firmware info */
- for (j = 0; j < mode_index; j++) {
- rdev->pm.power_state[state_index].clock_info[j].mclk =
- rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[j].sclk =
- rdev->clock.default_sclk;
- if (vddc)
- rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
- vddc;
- }
- }
- state_index++;
- }
- }
- /* if multiple clock modes, mark the lowest as no display */
- for (i = 0; i < state_index; i++) {
- if (rdev->pm.power_state[i].num_clock_modes > 1)
- rdev->pm.power_state[i].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- /* first mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
- rdev->pm.power_state[0].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = 0;
- rdev->pm.power_state[0].default_clock_mode =
- &rdev->pm.power_state[0].clock_info[0];
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+ vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ }
+
+ return vddc;
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+ int j;
+ u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+ u16 vddc = radeon_atombios_get_default_vddc(rdev);
+
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+ ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ }
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ if (ASIC_IS_DCE5(rdev)) {
+ /* NI chips post without MC ucode, so default clocks are strobe mode only */
+ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+ rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+ rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+ } else {
+ /* patch the table values with the default slck/mclk from firmware info */
+ for (j = 0; j < mode_index; j++) {
+ rdev->pm.power_state[state_index].clock_info[j].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[j].sclk =
+ rdev->clock.default_sclk;
+ if (vddc)
+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+ vddc;
}
}
+ }
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ union pplib_clock_info *clock_info)
+{
+ u32 sclk, mclk;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family >= CHIP_PALM) {
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ } else {
+ sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+ sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ }
+ } else if (ASIC_IS_DCE4(rdev)) {
+ sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+ sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+ mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->evergreen.usVDDC;
+ } else {
+ sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+ sclk |= clock_info->r600.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+ mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->r600.usVDDC;
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* skip invalid modes */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+ return false;
+ } else {
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ return false;
+ }
+ return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ /* first mode is usually default, followed by low to high */
+ for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+ i * power_info->pplib.ucStateEntrySize);
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+ (power_state->v1.ucNonClockStateIndex *
+ power_info->pplib.ucNonClockSize));
+ for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+ clock_info = (union pplib_clock_info *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+ (power_state->v1.ucClockStateIndices[j] *
+ power_info->pplib.ucClockInfoSize));
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, non_clock_array_index, clock_array_index;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ struct StateArray *state_array;
+ struct ClockInfoArray *clock_info_array;
+ struct NonClockInfoArray *non_clock_info_array;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ state_array = (struct StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ power_info->pplib.usStateArrayOffset);
+ clock_info_array = (struct ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ power_info->pplib.usClockInfoArrayOffset);
+ non_clock_info_array = (struct NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ power_info->pplib.usNonClockInfoArrayOffset);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)&state_array->states[i];
+ /* XXX this might be an inagua bug... */
+ non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+ /* XXX this might be an inagua bug... */
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ int state_index = 0;
+
+ rdev->pm.default_power_state_index = -1;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ switch (frev) {
+ case 1:
+ case 2:
+ case 3:
+ state_index = radeon_atombios_parse_power_table_1_3(rdev);
+ break;
+ case 4:
+ case 5:
+ state_index = radeon_atombios_parse_power_table_4_5(rdev);
+ break;
+ case 6:
+ state_index = radeon_atombios_parse_power_table_6(rdev);
+ break;
+ default:
+ break;
+ }
} else {
/* add the default mode */
rdev->pm.power_state[state_index].type =
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8f2c7b50dcf..1aba85cad1a 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -131,6 +131,45 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return true;
}
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+ u32 bus_cntl;
+ u32 d1vga_control;
+ u32 d2vga_control;
+ u32 vga_render_control;
+ u32 rom_cntl;
+ bool r;
+
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+ WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -416,6 +455,8 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP)
return igp_read_bios_from_vram(rdev);
+ else if (rdev->family >= CHIP_BARTS)
+ return ni_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_RV770)
return r700_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_R600)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 137b8075f6e..591fcae8f22 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -471,8 +471,9 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
return true;
}
+/* this is used for atom LCDs as well */
struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
if (rdev->mode_info.bios_hardcoded_edid)
return rdev->mode_info.bios_hardcoded_edid;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8afaf7a7459..22b7e3dc0ec 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -472,6 +472,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
+ /* add the width/height from vbios tables if available */
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
@@ -1216,7 +1219,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
@@ -1256,7 +1259,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
@@ -1299,7 +1302,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6d64a2705f1..35b5eb8fbe2 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
p->relocs[i].lobj.bo = p->relocs[i].robj;
- p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.wdomain = r->write_domain;
+ p->relocs[i].lobj.rdomain = r->read_domains;
+ p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
- INIT_LIST_HEAD(&p->relocs[i].lobj.list);
radeon_bo_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ &p->validated);
}
}
return radeon_bo_list_validate(&p->validated);
@@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (!error && parser->ib) {
- radeon_bo_list_fence(&parser->validated, parser->ib->fence);
- }
- radeon_bo_list_unreserve(&parser->validated);
+
+ if (!error && parser->ib)
+ ttm_eu_fence_buffer_objects(&parser->validated,
+ parser->ib->fence);
+ else
+ ttm_eu_backoff_reservation(&parser->validated);
+
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 501966a13f4..26091d602b8 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -81,6 +81,10 @@ static const char radeon_family_name[][16] = {
"JUNIPER",
"CYPRESS",
"HEMLOCK",
+ "PALM",
+ "BARTS",
+ "TURKS",
+ "CAICOS",
"LAST",
};
@@ -224,6 +228,11 @@ int radeon_wb_init(struct radeon_device *rdev)
rdev->wb.use_event = true;
}
}
+ /* always use writeback/events on NI */
+ if (ASIC_IS_DCE5(rdev)) {
+ rdev->wb.enabled = true;
+ rdev->wb.use_event = true;
+ }
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
@@ -335,7 +344,12 @@ bool radeon_card_posted(struct radeon_device *rdev)
uint32_t reg;
/* first check CRTCs */
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE41(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
@@ -636,20 +650,20 @@ void radeon_check_arguments(struct radeon_device *rdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct radeon_device *rdev = dev->dev_private;
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
- rdev->powered_down = false;
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_resume_kms(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_suspend_kms(dev, pmm);
- /* don't suspend or resume card normally */
- rdev->powered_down = true;
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -704,11 +718,6 @@ int radeon_device_init(struct radeon_device *rdev,
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- /* setup workqueue */
- rdev->wq = create_workqueue("radeon");
- if (rdev->wq == NULL)
- return -ENOMEM;
-
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
@@ -773,6 +782,7 @@ int radeon_device_init(struct radeon_device *rdev,
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
vga_switcheroo_register_client(rdev->pdev,
radeon_switcheroo_set_state,
+ NULL,
radeon_switcheroo_can_switch);
r = radeon_init(rdev);
@@ -806,7 +816,6 @@ void radeon_device_fini(struct radeon_device *rdev)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
- destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
if (rdev->rio_mem)
@@ -835,7 +844,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
rdev = dev->dev_private;
- if (rdev->powered_down)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* turn off display hw */
@@ -893,7 +902,7 @@ int radeon_resume_kms(struct drm_device *dev)
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
- if (rdev->powered_down)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
acquire_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1df4dc6c063..d26dabf878d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,7 +68,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
-static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -98,6 +98,66 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
}
}
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+ WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+ NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+ WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+ NI_GRPH_PRESCALE_BYPASS);
+ WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+ NI_OVL_PRESCALE_BYPASS);
+ WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+ NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+ WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+ NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+ WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+ NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+ WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+ NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+ WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -130,8 +190,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- if (ASIC_IS_DCE4(rdev))
- evergreen_crtc_load_lut(crtc);
+ if (ASIC_IS_DCE5(rdev))
+ dce5_crtc_load_lut(crtc);
+ else if (ASIC_IS_DCE4(rdev))
+ dce4_crtc_load_lut(crtc);
else if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
@@ -183,12 +245,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
kfree(radeon_crtc);
}
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+ struct radeon_unpin_work *work =
+ container_of(__work, struct radeon_unpin_work, work);
+ int r;
+
+ /* unpin of the old buffer */
+ r = radeon_bo_reserve(work->old_rbo, false);
+ if (likely(r == 0)) {
+ r = radeon_bo_unpin(work->old_rbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to unpin buffer after flip\n");
+ }
+ radeon_bo_unreserve(work->old_rbo);
+ } else
+ DRM_ERROR("failed to reserve buffer after flip\n");
+ kfree(work);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ struct radeon_unpin_work *work;
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ u32 update_pending;
+ int vpos, hpos;
+
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ work = radeon_crtc->unpin_work;
+ if (work == NULL ||
+ !radeon_fence_signaled(work->fence)) {
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+ /* New pageflip, or just completion of a previous one? */
+ if (!radeon_crtc->deferred_flip_completion) {
+ /* do the flip (mmio) */
+ update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+ } else {
+ /* This is just a completion of a flip queued in crtc
+ * at last invocation. Make sure we go directly to
+ * completion routine.
+ */
+ update_pending = 0;
+ radeon_crtc->deferred_flip_completion = 0;
+ }
+
+ /* Has the pageflip already completed in crtc, or is it certain
+ * to complete in this vblank?
+ */
+ if (update_pending &&
+ (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ &vpos, &hpos)) &&
+ (vpos >=0) &&
+ (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
+ /* crtc didn't flip in this target vblank interval,
+ * but flip is pending in crtc. It will complete it
+ * in next vblank interval, so complete the flip at
+ * next vblank irq.
+ */
+ radeon_crtc->deferred_flip_completion = 1;
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+
+ /* Pageflip (will be) certainly completed in this vblank. Clean up. */
+ radeon_crtc->unpin_work = NULL;
+
+ /* wakeup userspace */
+ if (work->event) {
+ e = work->event;
+ e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+ drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+ radeon_fence_unref(&work->fence);
+ radeon_post_page_flip(work->rdev, work->crtc_id);
+ schedule_work(&work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ struct radeon_fence *fence;
+ struct radeon_unpin_work *work;
+ unsigned long flags;
+ u32 tiling_flags, pitch_pixels;
+ u64 base;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ r = radeon_fence_create(rdev, &fence);
+ if (unlikely(r != 0)) {
+ kfree(work);
+ DRM_ERROR("flip queue: failed to create fence.\n");
+ return -ENOMEM;
+ }
+ work->event = event;
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->fence = radeon_fence_ref(fence);
+ old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ /* schedule unpin of the old buffer */
+ obj = old_radeon_fb->obj;
+ rbo = obj->driver_private;
+ work->old_rbo = rbo;
+ INIT_WORK(&work->work, radeon_unpin_work_func);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (radeon_crtc->unpin_work) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(work);
+ radeon_fence_unref(&fence);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ return -EBUSY;
+ }
+ radeon_crtc->unpin_work = work;
+ radeon_crtc->deferred_flip_completion = 0;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* pin the new buffer */
+ obj = new_radeon_fb->obj;
+ rbo = obj->driver_private;
+
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+ work->old_rbo, rbo);
+
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
+ DRM_ERROR("failed to pin new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ if (!ASIC_IS_AVIVO(rdev)) {
+ /* crtc offset is from display base addr not FB location */
+ base -= radeon_crtc->legacy_display_base_addr;
+ pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (ASIC_IS_R300(rdev)) {
+ base &= ~0x7ff;
+ } else {
+ int byteshift = fb->bits_per_pixel >> 4;
+ int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
+ base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+ }
+ } else {
+ int offset = crtc->y * pitch_pixels + crtc->x;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ default:
+ offset *= 1;
+ break;
+ case 15:
+ case 16:
+ offset *= 2;
+ break;
+ case 24:
+ offset *= 3;
+ break;
+ case 32:
+ offset *= 4;
+ break;
+ }
+ base += offset;
+ }
+ base &= ~7;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work->new_crtc_base = base;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* update crtc fb */
+ crtc->fb = fb;
+
+ r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+ if (r) {
+ DRM_ERROR("failed to get vblank before flip\n");
+ goto pflip_cleanup1;
+ }
+
+ /* 32 ought to cover us */
+ r = radeon_ring_lock(rdev, 32);
+ if (r) {
+ DRM_ERROR("failed to lock the ring before flip\n");
+ goto pflip_cleanup2;
+ }
+
+ /* emit the fence */
+ radeon_fence_emit(rdev, fence);
+ /* set the proper interrupt */
+ radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+ /* fire the ring */
+ radeon_ring_unlock_commit(rdev);
+
+ return 0;
+
+pflip_cleanup2:
+ drm_vblank_put(dev, radeon_crtc->crtc_id);
+
+pflip_cleanup1:
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve new rbo in error path\n");
+ goto pflip_cleanup;
+ }
+ r = radeon_bo_unpin(rbo);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
+ DRM_ERROR("failed to unpin new rbo in error path\n");
+ goto pflip_cleanup;
+ }
+ radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ radeon_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ radeon_fence_unref(&fence);
+ kfree(work);
+
+ return r;
+}
+
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set = radeon_crtc_cursor_set,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy,
+ .page_flip = radeon_crtc_page_flip,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -225,7 +547,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_legacy_init_crtc(dev, radeon_crtc);
}
-static const char *encoder_names[34] = {
+static const char *encoder_names[36] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -260,6 +582,8 @@ static const char *encoder_names[34] = {
"INTERNAL_KLDSCP_LVTMA",
"INTERNAL_UNIPHY1",
"INTERNAL_UNIPHY2",
+ "NUTMEG",
+ "TRAVIS",
};
static const char *connector_names[15] = {
@@ -417,9 +741,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (!radeon_connector->edid) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
}
- /* some servers provide a hardcoded edid in rom for KVMs */
- if (!radeon_connector->edid)
- radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -849,7 +1181,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ rdev->ddev->mode_config.max_width = 16384;
+ rdev->ddev->mode_config.max_height = 16384;
+ } else if (ASIC_IS_AVIVO(rdev)) {
rdev->ddev->mode_config.max_width = 8192;
rdev->ddev->mode_config.max_height = 8192;
} else {
@@ -1019,7 +1354,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
/*
* Retrieve current video scanout position of crtc on a given gpu.
*
- * \param rdev Device to query.
+ * \param dev Device to query.
* \param crtc Crtc to query.
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
@@ -1031,72 +1366,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \return Flags, or'ed together as follows:
*
- * RADEON_SCANOUTPOS_VALID = Query successfull.
- * RADEON_SCANOUTPOS_INVBL = Inside vblank.
- * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
* this flag means that returned position may be offset by a constant but
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
+ struct radeon_device *rdev = dev->dev_private;
+
if (ASIC_IS_DCE4(rdev)) {
if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC0_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC0_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC1_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC1_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 2) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC2_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC2_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 3) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC3_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC3_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 4) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC4_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC4_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 5) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC5_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC5_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
} else if (ASIC_IS_AVIVO(rdev)) {
if (crtc == 0) {
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
} else {
/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
@@ -1112,7 +1449,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1))
in_vbl = false;
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
@@ -1122,7 +1459,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1))
in_vbl = false;
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
}
@@ -1133,13 +1470,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Valid vblank area boundaries from gpu retrieved? */
if (vbl > 0) {
/* Yes: Decode. */
- ret |= RADEON_SCANOUTPOS_ACCURATE;
+ ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
}
else {
/* No: Fake something reasonable which gives at least ok results. */
- vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
vbl_end = 0;
}
@@ -1155,7 +1492,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
- vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+ vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
*vpos = *vpos - vtotal;
}
@@ -1164,7 +1501,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* In vblank? */
if (in_vbl)
- ret |= RADEON_SCANOUTPOS_INVBL;
+ ret |= DRM_SCANOUTPOS_INVBL;
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 60e689f2d04..be5cb4f28c2 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,9 +48,10 @@
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 7
+#define KMS_DRIVER_MINOR 8
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev);
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -296,6 +303,8 @@ static struct drm_driver kms_driver = {
.get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms,
.disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+ .get_scanout_position = radeon_get_crtc_scanoutpos,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = radeon_debugfs_init,
.debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 041943df966..8fd184286c0 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_monitor_audio(radeon_connector->edid)) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_monitor_audio(radeon_connector->edid)) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ else if (drm_detect_monitor_audio(radeon_connector->edid)) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -712,8 +712,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
- * DCE 4.0
- * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
@@ -724,6 +724,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
@@ -737,6 +743,7 @@ union dig_encoder_control {
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
};
void
@@ -752,6 +759,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
+ int hpd_id = RADEON_HPD_NONE;
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -760,6 +768,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
+ hpd_id = radeon_connector->hpd.hpd;
}
/* no dig encoder assigned */
@@ -784,19 +793,36 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dp_clock == 270000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
args.v1.ucLaneNum = dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
+ if (dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+ }
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
+ args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ if (hpd_id == RADEON_HPD_NONE)
+ args.v4.ucHPD_ID = 0;
+ else
+ args.v4.ucHPD_ID = hpd_id + 1;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
} else {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -823,6 +849,7 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
};
void
@@ -917,10 +944,18 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
pll_id = radeon_crtc->pll_id;
}
- if (is_dp && rdev->clock.dp_extclk)
- args.v3.acConfig.ucRefClkSource = 2; /* external src */
- else
- args.v3.acConfig.ucRefClkSource = pll_id;
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v4.acConfig.ucRefClkSource = 3; /* external src */
+ else
+ args.v4.acConfig.ucRefClkSource = pll_id;
+ } else {
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+ }
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -1044,6 +1079,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
static void
@@ -1054,6 +1090,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
@@ -1061,6 +1098,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
+ u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1099,6 +1137,37 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
+ case 3:
+ args.v3.sExtEncoder.ucAction = action;
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ args.v3.sExtEncoder.usConnectorId = connector_object_id;
+ else
+ args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dp_clock == 270000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+ args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.sExtEncoder.ucLaneNum = 8;
+ else
+ args.v3.sExtEncoder.ucLaneNum = 4;
+ switch (ext_enum) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+ break;
+ }
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
@@ -1158,6 +1227,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
+ bool is_dce5_dac = false;
+ bool is_dce5_dvo = false;
memset(&args, 0, sizeof(args));
@@ -1180,7 +1251,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- if (ASIC_IS_DCE3(rdev))
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dvo = true;
+ else if (ASIC_IS_DCE3(rdev))
is_dig = true;
else
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
@@ -1196,12 +1269,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dac = true;
+ else {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
@@ -1260,6 +1337,28 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
+ } else if (is_dce5_dac) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else if (is_dce5_dvo) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ }
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -1289,12 +1388,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
- action = ATOM_ENABLE;
+ if (ASIC_IS_DCE41(rdev))
+ action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
+ else
+ action = ATOM_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- action = ATOM_DISABLE;
+ if (ASIC_IS_DCE41(rdev))
+ action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
+ else
+ action = ATOM_DISABLE;
break;
}
atombios_external_encoder_setup(encoder, ext_encoder, action);
@@ -1483,27 +1588,35 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
+ /* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (ASIC_IS_DCE41(rdev)) {
if (dig->linkb)
return 1;
else
return 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- break;
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
}
}
@@ -1610,7 +1723,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
if (ext_encoder) {
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
@@ -1927,7 +2046,10 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
}
void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1970,6 +2092,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
radeon_encoder->is_ext_encoder = false;
+ radeon_encoder->caps = caps;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -2029,6 +2152,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index e329066dcab..1ca55eb09ad 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -80,6 +80,10 @@ enum radeon_family {
CHIP_JUNIPER,
CHIP_CYPRESS,
CHIP_HEMLOCK,
+ CHIP_PALM,
+ CHIP_BARTS,
+ CHIP_TURKS,
+ CHIP_CAICOS,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6abea32be5e..ca32e9c1e91 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,8 +225,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
-
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
@@ -247,8 +245,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daacb281dfa..171b0b2e3a6 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -38,6 +38,7 @@
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_trace.h"
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
@@ -57,6 +58,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
} else
radeon_fence_ring_emit(rdev, fence);
+ trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
@@ -213,6 +215,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
retry:
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@@ -227,6 +230,7 @@ retry:
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
}
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7ed14f..a289646e8aa 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -64,15 +64,15 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
struct radeon_device *rdev = dev->dev_private;
unsigned i;
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
-
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
+ rdev->irq.pflip[i] = false;
+ }
radeon_irq_set(rdev);
/* Clear bits */
radeon_irq_process(rdev);
@@ -101,8 +101,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
+ rdev->irq.pflip[i] = false;
+ }
radeon_irq_set(rdev);
}
@@ -110,6 +112,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
spin_lock_init(&rdev->irq.sw_lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
@@ -121,7 +125,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
* chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (!(rdev->flags & RADEON_IS_IGP)) &&
+ ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
(!(rdev->flags & RADEON_IS_AGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
@@ -148,6 +152,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
}
+ flush_work_sync(&rdev->hotplug_work);
}
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
@@ -175,3 +180,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+ if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
+ rdev->irq.pflip[crtc] = true;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
+ rdev->irq.pflip[crtc] = false;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8fbbe1c6ebb..28a53e4a925 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -96,9 +96,27 @@ out:
return r;
}
+static void radeon_set_filp_rights(struct drm_device *dev,
+ struct drm_file **owner,
+ struct drm_file *applier,
+ uint32_t *value)
+{
+ mutex_lock(&dev->struct_mutex);
+ if (*value == 1) {
+ /* wants rights */
+ if (!*owner)
+ *owner = applier;
+ } else if (*value == 0) {
+ /* revokes rights */
+ if (*owner == applier)
+ *owner = NULL;
+ }
+ *value = *owner == applier ? 1 : 0;
+ mutex_unlock(&dev->struct_mutex);
+}
/*
- * Userspace get informations ioctl
+ * Userspace get information ioctl
*/
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
@@ -173,18 +191,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
- if (value == 1) {
- /* wants hyper-z */
- if (!rdev->hyperz_filp)
- rdev->hyperz_filp = filp;
- } else if (value == 0) {
- /* revokes hyper-z */
- if (rdev->hyperz_filp == filp)
- rdev->hyperz_filp = NULL;
+ radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+ break;
+ case RADEON_INFO_WANT_CMASK:
+ /* The same logic as Hyper-Z. */
+ if (value >= 2) {
+ DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+ return -EINVAL;
}
- value = rdev->hyperz_filp == filp ? 1 : 0;
- mutex_unlock(&dev->struct_mutex);
+ radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
@@ -203,10 +218,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*/
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev->powered_down)
- return -EINVAL;
return 0;
}
@@ -277,6 +288,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
radeon_irq_set(rdev);
}
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *drmcrtc;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Get associated drm_crtc: */
+ drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+ vblank_time, flags,
+ drmcrtc);
+}
/*
* IOCTL.
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e301c6f9e05..12bdeab91c8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -277,6 +277,9 @@ struct radeon_crtc {
fixed20_12 hsc;
struct drm_display_mode native_mode;
int pll_id;
+ /* page flipping */
+ struct radeon_unpin_work *unpin_work;
+ int deferred_flip_completion;
};
struct radeon_encoder_primary_dac {
@@ -376,6 +379,7 @@ struct radeon_encoder {
int hdmi_audio_workaround;
int hdmi_buffer_status;
bool is_ext_encoder;
+ u16 caps;
};
struct radeon_connector_atom_dig {
@@ -442,10 +446,6 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
-/* radeon_get_crtc_scanoutpos() return flags */
-#define RADEON_SCANOUTPOS_VALID (1 << 0)
-#define RADEON_SCANOUTPOS_INVBL (1 << 1)
-#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -562,11 +562,12 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
-extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
@@ -662,4 +663,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a598d0049aa..7d6b8e88f74 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_trace.h"
int radeon_ttm_init(struct radeon_device *rdev);
@@ -146,6 +147,7 @@ retry:
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&bo->rdev->gem.mutex);
}
+ trace_radeon_bo_create(bo);
return 0;
}
@@ -302,34 +304,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head)
{
if (lobj->wdomain) {
- list_add(&lobj->list, head);
+ list_add(&lobj->tv.head, head);
} else {
- list_add_tail(&lobj->list, head);
- }
-}
-
-int radeon_bo_list_reserve(struct list_head *head)
-{
- struct radeon_bo_list *lobj;
- int r;
-
- list_for_each_entry(lobj, head, list){
- r = radeon_bo_reserve(lobj->bo, false);
- if (unlikely(r != 0))
- return r;
- lobj->reserved = true;
- }
- return 0;
-}
-
-void radeon_bo_list_unreserve(struct list_head *head)
-{
- struct radeon_bo_list *lobj;
-
- list_for_each_entry(lobj, head, list) {
- /* only unreserve object we successfully reserved */
- if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
- radeon_bo_unreserve(lobj->bo);
+ list_add_tail(&lobj->tv.head, head);
}
}
@@ -340,14 +317,11 @@ int radeon_bo_list_validate(struct list_head *head)
u32 domain;
int r;
- list_for_each_entry(lobj, head, list) {
- lobj->reserved = false;
- }
- r = radeon_bo_list_reserve(head);
+ r = ttm_eu_reserve_buffers(head);
if (unlikely(r != 0)) {
return r;
}
- list_for_each_entry(lobj, head, list) {
+ list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
@@ -370,25 +344,6 @@ int radeon_bo_list_validate(struct list_head *head)
return 0;
}
-void radeon_bo_list_fence(struct list_head *head, void *fence)
-{
- struct radeon_bo_list *lobj;
- struct radeon_bo *bo;
- struct radeon_fence *old_fence = NULL;
-
- list_for_each_entry(lobj, head, list) {
- bo = lobj->bo;
- spin_lock(&bo->tbo.lock);
- old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
- bo->tbo.sync_obj = radeon_fence_ref(fence);
- bo->tbo.sync_obj_arg = NULL;
- spin_unlock(&bo->tbo.lock);
- if (old_fence) {
- radeon_fence_unref(&old_fence);
- }
- }
-}
-
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702b244..22d4c237dea 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0))
return r;
- spin_lock(&bo->tbo.lock);
+ spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- spin_unlock(&bo->tbo.lock);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
@@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
-extern int radeon_bo_list_reserve(struct list_head *head);
-extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head);
-extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8c9b2ef32c6..3b1b2bf9cdd 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -167,13 +167,13 @@ static void radeon_set_power_state(struct radeon_device *rdev)
if (radeon_gui_idle(rdev)) {
sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk;
- if (sclk > rdev->clock.default_sclk)
- sclk = rdev->clock.default_sclk;
+ if (sclk > rdev->pm.default_sclk)
+ sclk = rdev->pm.default_sclk;
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk;
- if (mclk > rdev->clock.default_mclk)
- mclk = rdev->clock.default_mclk;
+ if (mclk > rdev->pm.default_mclk)
+ mclk = rdev->pm.default_mclk;
/* upvolt before raising clocks, downvolt after lowering clocks */
if (sclk < rdev->pm.current_sclk)
@@ -405,20 +405,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
- if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- flush_wq = true;
- }
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.pm_method = PM_METHOD_PROFILE;
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
DRM_ERROR("invalid power method!\n");
goto fail;
@@ -447,8 +440,12 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
temp = rv770_get_temp(rdev);
break;
case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
+ case THERMAL_TYPE_SUMO:
+ temp = sumo_get_temp(rdev);
+ break;
default:
temp = 0;
break;
@@ -487,6 +484,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_RV6XX:
case THERMAL_TYPE_RV770:
case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_SUMO:
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
if (IS_ERR(rdev->pm.int_hwmon_dev)) {
err = PTR_ERR(rdev->pm.int_hwmon_dev);
@@ -520,34 +518,39 @@ static void radeon_hwmon_fini(struct radeon_device *rdev)
void radeon_pm_suspend(struct radeon_device *rdev)
{
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
- flush_wq = true;
}
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
}
void radeon_pm_resume(struct radeon_device *rdev)
{
+ /* set up the default clocks if the MC ucode is loaded */
+ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
/* asic init will reset the default power state */
mutex_lock(&rdev->pm.mutex);
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
- rdev->pm.current_sclk = rdev->clock.default_sclk;
- rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.current_sclk = rdev->pm.default_sclk;
+ rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
@@ -564,6 +567,8 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.default_sclk = rdev->clock.default_sclk;
+ rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
@@ -575,12 +580,24 @@ int radeon_pm_init(struct radeon_device *rdev)
radeon_combios_get_power_modes(rdev);
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
+ /* set up the default clocks if the MC ucode is loaded */
+ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
}
/* set up the internal thermal sensor if applicable */
ret = radeon_hwmon_init(rdev);
if (ret)
return ret;
+
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -594,8 +611,6 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->acpi_nb.notifier_call = radeon_acpi_event;
register_acpi_notifier(&rdev->acpi_nb);
#endif
- INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
-
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
@@ -609,25 +624,20 @@ int radeon_pm_init(struct radeon_device *rdev)
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- /* cancel work */
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- flush_wq = true;
/* reset default clocks */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -686,12 +696,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
}
} else { /* count == 0 */
@@ -720,9 +730,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
- if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
- !(vbl_status & RADEON_SCANOUTPOS_INVBL))
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+ if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+ !(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
}
}
@@ -796,8 +806,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
radeon_pm_set_clocks(rdev);
}
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
@@ -814,9 +824,9 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
if (rdev->pm.current_vddc)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 64928814de5..3cd4dace57c 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -55,6 +55,7 @@
#include "r500_reg.h"
#include "r600_reg.h"
#include "evergreen_reg.h"
+#include "ni_reg.h"
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -320,6 +321,15 @@
# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
+# define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10)
+# define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11)
+# define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
#define RADEON_CACHE_CNTL 0x1724
#define RADEON_CACHE_LINE 0x0f0c /* PCI */
@@ -422,6 +432,7 @@
# define RADEON_CRTC_CSYNC_EN (1 << 4)
# define RADEON_CRTC_ICON_EN (1 << 15)
# define RADEON_CRTC_CUR_EN (1 << 16)
+# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
# define RADEON_CRTC_CUR_MODE_SHIFT 20
# define RADEON_CRTC_CUR_MODE_MONO 0
@@ -509,6 +520,8 @@
# define RADEON_CRTC_TILE_EN (1 << 15)
# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29)
#define R300_CRTC_TILE_X0_Y0 0x0350
#define R300_CRTC2_TILE_X0_Y0 0x0358
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 00000000000..eafd8160a15
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+ TP_PROTO(struct radeon_bo *bo),
+ TP_ARGS(bo),
+ TP_STRUCT__entry(
+ __field(struct radeon_bo *, bo)
+ __field(u32, pages)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->pages = bo->tbo.num_pages;
+ ),
+ TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 00000000000..8175993df84
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index b3f9f1d9200..ef422bbacfc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -304,6 +304,22 @@ rv515 0x6d40
0x4630 US_CODE_ADDR
0x4634 US_CODE_RANGE
0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
0x46A4 US_OUT_FMT_0
0x46A8 US_OUT_FMT_1
0x46AC US_OUT_FMT_2
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02c2e6..b4192acaab5 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,56 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
void rs600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = S_000044_SW_INT(1);
@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
}
if (G_000044_DISPLAY_INT_STAT(irqs)) {
- *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+ rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006534_D1MODE_VBLANK_STATUS,
S_006534_D1MODE_VBLANK_ACK(1));
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
} else {
- *r500_disp_int = 0;
+ rdev->irq.stat_regs.r500.disp_int = 0;
}
if (irqs) {
@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
void rs600_irq_disable(struct radeon_device *rdev)
{
- u32 tmp;
-
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
mdelay(1);
- rs600_irq_ack(rdev, &tmp);
+ rs600_irq_ack(rdev);
}
int rs600_irq_process(struct radeon_device *rdev)
{
- uint32_t status, msi_rearm;
- uint32_t r500_disp_int;
+ u32 status, msi_rearm;
bool queue_hotplug = false;
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
- status = rs600_irq_ack(rdev, &r500_disp_int);
- if (!status && !r500_disp_int) {
+ status = rs600_irq_ack(rdev);
+ if (!status && !rdev->irq.stat_regs.r500.disp_int) {
return IRQ_NONE;
}
- while (status || r500_disp_int) {
+ while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
- if (G_000044_SW_INT(status))
+ if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev);
+ }
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true;
@@ -611,30 +661,38 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD1\n");
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
- status = rs600_irq_ack(rdev, &r500_disp_int);
+ status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4dfead8cee3..3a264aa3a79 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -41,6 +41,41 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ if (radeon_crtc->crtc_id) {
+ WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ } else {
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ }
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
/* get temperature in millidegrees */
u32 rv770_get_temp(struct radeon_device *rdev)
@@ -489,6 +524,49 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
+static void rv770_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer, mc_shared_chremap, tmp;
+ bool force_no_swizzle;
+
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ case 2:
+ case 3:
+ if (force_no_swizzle)
+ mc_shared_chremap = 0x00fac688;
+ else
+ mc_shared_chremap = 0x00bbc298;
+ break;
+ }
+
+ if (rdev->family == CHIP_RV740)
+ tcp_chan_steer = 0x00ef2a60;
+ else
+ tcp_chan_steer = 0x00fac688;
+
+ WREG32(TCP_CHAN_STEER, tcp_chan_steer);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -688,6 +766,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ rv770_program_channel_remap(rdev);
+
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@ -956,6 +1036,45 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
@@ -996,7 +1115,7 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
- r600_vram_gtt_location(rdev, &rdev->mc);
+ r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
@@ -1006,6 +1125,9 @@ static int rv770_startup(struct radeon_device *rdev)
{
int r;
+ /* enable pcie gen2 link */
+ rv770_pcie_gen2_enable(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1244,3 +1366,75 @@ void rv770_fini(struct radeon_device *rdev)
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, tmp;
+ u16 link_cntl2;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW |
+ LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b7a5a20e81d..abc8cf5a367 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -138,6 +138,7 @@
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
@@ -303,6 +304,7 @@
#define BILINEAR_PRECISION_8_BIT (1 << 31)
#define TCP_CNTL 0x9610
+#define TCP_CHAN_STEER 0x9614
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -351,4 +353,49 @@
#define SRBM_STATUS 0x0E50
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 934a96a7854..af61fc29e84 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
}
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
-static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
}
-/**
- * Call with the lru_lock held.
- */
-
-static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
int put_count = 0;
@@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
/**
* Deadlock avoidance for multi-bo reserving.
*/
- if (use_sequence && bo->seq_valid &&
- (sequence - bo->val_seq < (1 << 31))) {
- return -EAGAIN;
+ if (use_sequence && bo->seq_valid) {
+ /**
+ * We've already reserved this one.
+ */
+ if (unlikely(sequence == bo->val_seq))
+ return -EDEADLK;
+ /**
+ * Already reserved by a thread that will not back
+ * off for us. We need to back off.
+ */
+ if (unlikely(sequence - bo->val_seq < (1 << 31)))
+ return -EAGAIN;
}
if (no_wait)
@@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
BUG();
}
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free)
+{
+ kref_sub(&bo->list_kref, count,
+ (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
+}
+
int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return ret;
}
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+}
+
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->glob;
spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ ttm_bo_unreserve_locked(bo);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
int ret = 0;
if (old_is_pci || new_is_pci ||
- ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
- ttm_bo_unmap_virtual(bo);
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+ ret = ttm_mem_io_lock(old_man, true);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(old_man);
+ }
/*
* Create and bind a ttm if required.
@@ -416,11 +437,9 @@ moved:
}
if (bo->mem.mm_node) {
- spin_lock(&bo->lock);
bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
- spin_unlock(&bo->lock);
} else
bo->offset = 0;
@@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
-
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
@@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int put_count;
int ret;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
if (!bo->sync_obj) {
spin_lock(&glob->lru_lock);
/**
- * Lock inversion between bo::reserve and bo::lock here,
+ * Lock inversion between bo:reserve and bdev::fence_lock here,
* but that's OK, since we're only trylocking.
*/
@@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
if (unlikely(ret == -EBUSY))
goto queue;
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return;
} else {
@@ -512,7 +529,7 @@ queue:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (sync_obj) {
driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool no_wait_reserve,
bool no_wait_gpu)
{
+ struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret = 0;
retry:
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0))
return ret;
@@ -580,8 +598,7 @@ retry:
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return 0;
}
@@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref)
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
write_lock(&bdev->vm_lock);
@@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
@@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.bus.io_reserved = false;
+ evict_mem.bus.io_reserved_vm = false;
+ evict_mem.bus.io_reserved_count = 0;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -802,8 +824,7 @@ retry:
BUG_ON(ret != 0);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
@@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
{
int ret = 0;
struct ttm_mem_reg mem;
+ struct ttm_bo_device *bdev = bo->bdev;
BUG_ON(!atomic_read(&bo->reserved));
@@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
- mem.bus.io_reserved = false;
+ mem.bus.io_reserved_vm = false;
+ mem.bus.io_reserved_count = 0;
/*
* Determine where to move the buffer.
*/
@@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
bo->destroy = destroy;
- spin_lock_init(&bo->lock);
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
@@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
+ INIT_LIST_HEAD(&bo->io_reserve_lru);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
- bo->mem.bus.io_reserved = false;
+ bo->mem.bus.io_reserved_vm = false;
+ bo->mem.bus.io_reserved_count = 0;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
BUG_ON(man->has_type);
+ man->io_reserve_fastpath = true;
+ man->use_io_reserve_lru = false;
+ mutex_init(&man->io_reserve_mutex);
+ INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
@@ -1526,7 +1554,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->dev_mapping = NULL;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
-
+ bdev->val_seq = 0;
+ spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
@@ -1560,7 +1589,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1569,8 +1598,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
- ttm_mem_io_free(bdev, &bo->mem);
+ ttm_mem_io_free_vm(bo);
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+ ttm_mem_io_lock(man, false);
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(man);
}
+
+
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
@@ -1650,6 +1691,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
struct ttm_bo_driver *driver = bo->bdev->driver;
+ struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
void *sync_obj_arg;
int ret = 0;
@@ -1663,9 +1705,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
continue;
}
@@ -1674,29 +1716,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
sync_obj = driver->sync_obj_ref(bo->sync_obj);
sync_obj_arg = bo->sync_obj_arg;
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
return ret;
}
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (likely(bo->sync_obj == sync_obj &&
bo->sync_obj_arg == sync_obj_arg)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
&bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
} else {
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
}
}
return 0;
@@ -1705,6 +1747,7 @@ EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
/*
@@ -1714,9 +1757,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
if (unlikely(ret != 0))
return ret;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true, no_wait);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
@@ -1782,16 +1825,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
/**
* Wait for GPU, then move to system cached.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bo->bdev->fence_lock);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5bcce3..77dbf408c0d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
- int ret;
+ if (likely(man->io_reserve_fastpath))
+ return 0;
+
+ if (interruptible)
+ return mutex_lock_interruptible(&man->io_reserve_mutex);
+
+ mutex_lock(&man->io_reserve_mutex);
+ return 0;
+}
- if (!mem->bus.io_reserved) {
- mem->bus.io_reserved = true;
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ mutex_unlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+ struct ttm_buffer_object *bo;
+
+ if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+ return -EAGAIN;
+
+ bo = list_first_entry(&man->io_reserve_lru,
+ struct ttm_buffer_object,
+ io_reserve_lru);
+ list_del_init(&bo->io_reserve_lru);
+ ttm_bo_unmap_virtual_locked(bo);
+
+ return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (!bdev->driver->io_mem_reserve)
+ return 0;
+ if (likely(man->io_reserve_fastpath))
+ return bdev->driver->io_mem_reserve(bdev, mem);
+
+ if (bdev->driver->io_mem_reserve &&
+ mem->bus.io_reserved_count++ == 0) {
+retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (ret == -EAGAIN) {
+ ret = ttm_mem_io_evict(man);
+ if (ret == 0)
+ goto retry;
+ }
+ }
+ return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ if (bdev->driver->io_mem_reserve &&
+ --mem->bus.io_reserved_count == 0 &&
+ bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ int ret;
+
+ if (!mem->bus.io_reserved_vm) {
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[mem->mem_type];
+
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
+ mem->bus.io_reserved_vm = true;
+ if (man->use_io_reserve_lru)
+ list_add_tail(&bo->io_reserve_lru,
+ &man->io_reserve_lru);
}
return 0;
}
-void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
- if (bdev->driver->io_mem_reserve) {
- if (mem->bus.io_reserved) {
- mem->bus.io_reserved = false;
- bdev->driver->io_mem_free(bdev, mem);
- }
+ struct ttm_mem_reg *mem = &bo->mem;
+
+ if (mem->bus.io_reserved_vm) {
+ mem->bus.io_reserved_vm = false;
+ list_del_init(&bo->io_reserve_lru);
+ ttm_mem_io_free(bo->bdev, mem);
}
}
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
+ (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
+ ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
else
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
+ (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy = *old_mem;
+ struct ttm_mem_reg old_copy;
void *old_iomap;
void *new_iomap;
int ret;
@@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
mb();
out2:
- ttm_bo_free_old_node(bo);
-
+ old_copy = *old_mem;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
@@ -292,9 +381,10 @@ out2:
}
out1:
- ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
+ ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- spin_lock_init(&fbo->lock);
init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
+ INIT_LIST_HEAD(&fbo->io_reserve_lru);
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
@@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
@@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
+ (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+ ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
@@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
+ struct ttm_buffer_object *bo = map->bo;
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
+
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
- ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+ ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
@@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
@@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj_arg = sync_obj_arg;
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
@@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77899f..221b924aceb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int i;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
/*
* Work around locking order reversal in fault / nopfn
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* move.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
ret = ttm_bo_wait(bo, false, true, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
} else
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
-
- ret = ttm_mem_io_reserve(bdev, &bo->mem);
- if (ret) {
- retval = VM_FAULT_SIGBUS;
+ ret = ttm_mem_io_lock(man, true);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_NOPAGE;
goto out_unlock;
}
+ ret = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_io_unlock;
+ }
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
- goto out_unlock;
+ goto out_io_unlock;
}
/*
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
- goto out_unlock;
+ goto out_io_unlock;
} else if (unlikely(!page)) {
break;
}
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
else if (unlikely(ret != 0)) {
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
- goto out_unlock;
+ goto out_io_unlock;
}
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
break;
}
-
+out_io_unlock:
+ ttm_mem_io_unlock(man);
out_unlock:
ttm_bo_unreserve(bo);
return retval;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d1..3832fe10b4d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-void ttm_eu_backoff_reservation(struct list_head *list)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list)
if (!entry->reserved)
continue;
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+
+ }
entry->reserved = false;
- ttm_bo_unreserve(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
}
}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int ret;
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ spin_lock(&glob->lru_lock);
+ if (unlikely(ret != 0))
+ ttm_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_bo_global *glob;
+
+ if (list_empty(list))
+ return;
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
/*
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders.
*/
-int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+int ttm_eu_reserve_buffers(struct list_head *list)
{
+ struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
+ uint32_t val_seq;
+
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
retry:
+ spin_lock(&glob->lru_lock);
+ val_seq = entry->bo->bdev->val_seq++;
+
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- entry->reserved = false;
- ret = ttm_bo_reserve(bo, true, false, true, val_seq);
- if (ret != 0) {
- ttm_eu_backoff_reservation(list);
- if (ret == -EAGAIN) {
- ret = ttm_bo_wait_unreserved(bo, true);
- if (unlikely(ret != 0))
- return ret;
- goto retry;
- } else
+retry_this_bo:
+ ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = ttm_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
return ret;
+ }
+ goto retry_this_bo;
+ case -EAGAIN:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ default:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation(list);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
ret = ttm_bo_wait_cpu(bo, false);
if (ret)
return ret;
goto retry;
}
}
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- struct ttm_bo_driver *driver = bo->bdev->driver;
- void *old_sync_obj;
+ if (list_empty(list))
+ return;
+
+ bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
- spin_lock(&bo->lock);
- old_sync_obj = bo->sync_obj;
+ spin_lock(&bdev->fence_lock);
+ spin_lock(&glob->lru_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+ entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
bo->sync_obj_arg = entry->new_sync_obj_arg;
- spin_unlock(&bo->lock);
- ttm_bo_unreserve(bo);
+ ttm_bo_unreserve_locked(bo);
entry->reserved = false;
- if (old_sync_obj)
- driver->sync_obj_unref(&old_sync_obj);
+ }
+ spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, list, head) {
+ if (entry->old_sync_obj)
+ driver->sync_obj_unref(&entry->old_sync_obj);
}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e7a58d05504..10fc01f69c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -264,7 +264,6 @@ struct vmw_private {
*/
struct vmw_sw_context ctx;
- uint32_t val_seq;
struct mutex cmdbuf_mutex;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 76954e3528c..41b95ed6dbc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
if (unlikely(ret != 0))
goto out_err;
- ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
- dev_priv->val_seq++);
+ ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index fe096a7cc0d..bfab60c938a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -480,9 +480,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->fix.smem_start = 0;
info->fix.smem_len = fb_size;
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
-
info->pseudo_palette = par->pseudo_palette;
info->screen_base = par->vmalloc;
info->screen_size = fb_size;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index c8768f38511..e01cacba685 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -33,6 +33,7 @@ struct vga_switcheroo_client {
struct fb_info *fb_info;
int pwr_state;
void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
+ void (*reprobe)(struct pci_dev *pdev);
bool (*can_switch)(struct pci_dev *pdev);
int id;
bool active;
@@ -103,6 +104,7 @@ static void vga_switcheroo_enable(void)
int vga_switcheroo_register_client(struct pci_dev *pdev,
void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
+ void (*reprobe)(struct pci_dev *pdev),
bool (*can_switch)(struct pci_dev *pdev))
{
int index;
@@ -117,6 +119,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
vgasr_priv.clients[index].pdev = pdev;
vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
+ vgasr_priv.clients[index].reprobe = reprobe;
vgasr_priv.clients[index].can_switch = can_switch;
vgasr_priv.clients[index].id = -1;
if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
@@ -174,7 +177,8 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
int i;
mutex_lock(&vgasr_mutex);
for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- seq_printf(m, "%d:%c:%s:%s\n", i,
+ seq_printf(m, "%d:%s:%c:%s:%s\n", i,
+ vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
vgasr_priv.clients[i].active ? '+' : ' ',
vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
pci_name(vgasr_priv.clients[i].pdev));
@@ -190,9 +194,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
static int vga_switchon(struct vga_switcheroo_client *client)
{
- int ret;
-
- ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
+ if (vgasr_priv.handler->power_state)
+ vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
client->pwr_state = VGA_SWITCHEROO_ON;
@@ -203,12 +206,14 @@ static int vga_switchoff(struct vga_switcheroo_client *client)
{
/* call the driver callback to turn off device */
client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
- vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
+ if (vgasr_priv.handler->power_state)
+ vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
client->pwr_state = VGA_SWITCHEROO_OFF;
return 0;
}
-static int vga_switchto(struct vga_switcheroo_client *new_client)
+/* stage one happens before delay */
+static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
{
int ret;
int i;
@@ -235,10 +240,28 @@ static int vga_switchto(struct vga_switcheroo_client *new_client)
vga_switchon(new_client);
/* swap shadow resource to denote boot VGA device has changed so X starts on new device */
- active->active = false;
-
active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+ return 0;
+}
+
+/* post delay */
+static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
+{
+ int ret;
+ int i;
+ struct vga_switcheroo_client *active = NULL;
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active == true) {
+ active = &vgasr_priv.clients[i];
+ break;
+ }
+ }
+ if (!active)
+ return 0;
+
+ active->active = false;
if (new_client->fb_info) {
struct fb_event event;
@@ -250,6 +273,9 @@ static int vga_switchto(struct vga_switcheroo_client *new_client)
if (ret)
return ret;
+ if (new_client->reprobe)
+ new_client->reprobe(new_client->pdev);
+
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
@@ -265,6 +291,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
const char *pdev_name;
int i, ret;
bool delay = false, can_switch;
+ bool just_mux = false;
int client_id = -1;
struct vga_switcheroo_client *client = NULL;
@@ -319,6 +346,15 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (strncmp(usercmd, "DIS", 3) == 0)
client_id = VGA_SWITCHEROO_DIS;
+ if (strncmp(usercmd, "MIGD", 4) == 0) {
+ just_mux = true;
+ client_id = VGA_SWITCHEROO_IGD;
+ }
+ if (strncmp(usercmd, "MDIS", 4) == 0) {
+ just_mux = true;
+ client_id = VGA_SWITCHEROO_DIS;
+ }
+
if (client_id == -1)
goto out;
@@ -330,6 +366,12 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
}
vgasr_priv.delayed_switch_active = false;
+
+ if (just_mux) {
+ ret = vgasr_priv.handler->switchto(client_id);
+ goto out;
+ }
+
/* okay we want a switch - test if devices are willing to switch */
can_switch = true;
for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
@@ -345,18 +387,22 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (can_switch == true) {
pdev_name = pci_name(client->pdev);
- ret = vga_switchto(client);
+ ret = vga_switchto_stage1(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
+ printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+
+ ret = vga_switchto_stage2(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+
} else {
printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
vgasr_priv.delayed_switch_active = true;
vgasr_priv.delayed_client_id = client_id;
- /* we should at least power up the card to
- make the switch faster */
- if (client->pwr_state == VGA_SWITCHEROO_OFF)
- vga_switchon(client);
+ ret = vga_switchto_stage1(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
}
out:
@@ -438,9 +484,9 @@ int vga_switcheroo_process_delayed_switch(void)
goto err;
pdev_name = pci_name(client->pdev);
- ret = vga_switchto(client);
+ ret = vga_switchto_stage2(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
+ printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
vgasr_priv.delayed_switch_active = false;
err = 0;
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index a39e6cff86e..38319a69bd0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -600,12 +600,14 @@ static const struct i2c_algorithm i2c_bit_algo = {
/*
* registering functions to load algorithms at runtime
*/
-static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
+static int __i2c_bit_add_bus(struct i2c_adapter *adap,
+ int (*add_adapter)(struct i2c_adapter *))
{
struct i2c_algo_bit_data *bit_adap = adap->algo_data;
+ int ret;
if (bit_test) {
- int ret = test_bus(bit_adap, adap->name);
+ ret = test_bus(bit_adap, adap->name);
if (ret < 0)
return -ENODEV;
}
@@ -614,30 +616,27 @@ static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
adap->algo = &i2c_bit_algo;
adap->retries = 3;
+ ret = add_adapter(adap);
+ if (ret < 0)
+ return ret;
+
+ /* Complain if SCL can't be read */
+ if (bit_adap->getscl == NULL) {
+ dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
+ dev_warn(&adap->dev, "Bus may be unreliable\n");
+ }
return 0;
}
int i2c_bit_add_bus(struct i2c_adapter *adap)
{
- int err;
-
- err = i2c_bit_prepare_bus(adap);
- if (err)
- return err;
-
- return i2c_add_adapter(adap);
+ return __i2c_bit_add_bus(adap, i2c_add_adapter);
}
EXPORT_SYMBOL(i2c_bit_add_bus);
int i2c_bit_add_numbered_bus(struct i2c_adapter *adap)
{
- int err;
-
- err = i2c_bit_prepare_bus(adap);
- if (err)
- return err;
-
- return i2c_add_numbered_adapter(adap);
+ return __i2c_bit_add_bus(adap, i2c_add_numbered_adapter);
}
EXPORT_SYMBOL(i2c_bit_add_numbered_bus);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 02835ce7ff4..7979aef7ee7 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/dmi.h>
+#include <linux/slab.h>
/* I801 SMBus address offsets */
#define SMBHSTSTS(p) (0 + (p)->smba)
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index a605a5029cf..ff1e127dfea 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -432,7 +432,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
static void __devexit nforce2_remove(struct pci_dev *dev)
{
- struct nforce2_smbus *smbuses = (void*) pci_get_drvdata(dev);
+ struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
nforce2_set_reference(NULL);
if (smbuses[0].base) {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6b4cc567645..c7db6980e3a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1362,7 +1362,7 @@ EXPORT_SYMBOL(i2c_transfer);
*
* Returns negative errno, or else the number of bytes written.
*/
-int i2c_master_send(struct i2c_client *client, const char *buf, int count)
+int i2c_master_send(const struct i2c_client *client, const char *buf, int count)
{
int ret;
struct i2c_adapter *adap = client->adapter;
@@ -1389,7 +1389,7 @@ EXPORT_SYMBOL(i2c_master_send);
*
* Returns negative errno, or else the number of bytes read.
*/
-int i2c_master_recv(struct i2c_client *client, char *buf, int count)
+int i2c_master_recv(const struct i2c_client *client, char *buf, int count)
{
struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
@@ -1679,7 +1679,7 @@ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg)
* This executes the SMBus "receive byte" protocol, returning negative errno
* else the byte received from the device.
*/
-s32 i2c_smbus_read_byte(struct i2c_client *client)
+s32 i2c_smbus_read_byte(const struct i2c_client *client)
{
union i2c_smbus_data data;
int status;
@@ -1699,7 +1699,7 @@ EXPORT_SYMBOL(i2c_smbus_read_byte);
* This executes the SMBus "send byte" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value)
{
return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
@@ -1714,7 +1714,7 @@ EXPORT_SYMBOL(i2c_smbus_write_byte);
* This executes the SMBus "read byte" protocol, returning negative errno
* else a data byte received from the device.
*/
-s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command)
{
union i2c_smbus_data data;
int status;
@@ -1735,7 +1735,8 @@ EXPORT_SYMBOL(i2c_smbus_read_byte_data);
* This executes the SMBus "write byte" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command,
+ u8 value)
{
union i2c_smbus_data data;
data.byte = value;
@@ -1753,7 +1754,7 @@ EXPORT_SYMBOL(i2c_smbus_write_byte_data);
* This executes the SMBus "read word" protocol, returning negative errno
* else a 16-bit unsigned "word" received from the device.
*/
-s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command)
{
union i2c_smbus_data data;
int status;
@@ -1774,7 +1775,8 @@ EXPORT_SYMBOL(i2c_smbus_read_word_data);
* This executes the SMBus "write word" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command,
+ u16 value)
{
union i2c_smbus_data data;
data.word = value;
@@ -1793,7 +1795,8 @@ EXPORT_SYMBOL(i2c_smbus_write_word_data);
* This executes the SMBus "process call" protocol, returning negative errno
* else a 16-bit unsigned "word" received from the device.
*/
-s32 i2c_smbus_process_call(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command,
+ u16 value)
{
union i2c_smbus_data data;
int status;
@@ -1821,7 +1824,7 @@ EXPORT_SYMBOL(i2c_smbus_process_call);
* support this; its emulation through I2C messaging relies on a specific
* mechanism (I2C_M_RECV_LEN) which may not be implemented.
*/
-s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command,
u8 *values)
{
union i2c_smbus_data data;
@@ -1848,7 +1851,7 @@ EXPORT_SYMBOL(i2c_smbus_read_block_data);
* This executes the SMBus "block write" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
union i2c_smbus_data data;
@@ -1864,7 +1867,7 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
EXPORT_SYMBOL(i2c_smbus_write_block_data);
/* Returns the number of read bytes */
-s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command,
u8 length, u8 *values)
{
union i2c_smbus_data data;
@@ -1884,7 +1887,7 @@ s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
}
EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data);
-s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
union i2c_smbus_data data;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 4d91d80bfd2..90b7a016389 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -5,6 +5,18 @@
menu "Multiplexer I2C Chip support"
depends on I2C_MUX
+config I2C_MUX_GPIO
+ tristate "GPIO-based I2C multiplexer"
+ depends on GENERIC_GPIO
+ help
+ If you say yes to this option, support will be included for a
+ GPIO based I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which is controlled
+ through GPIO pins.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-i2cmux.
+
config I2C_MUX_PCA9541
tristate "NXP PCA9541 I2C Master Selector"
depends on EXPERIMENTAL
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index d743806d9b4..4640436ea61 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for multiplexer I2C chip drivers.
+obj-$(CONFIG_I2C_MUX_GPIO) += gpio-i2cmux.o
obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
new file mode 100644
index 00000000000..7b6ce624cd6
--- /dev/null
+++ b/drivers/i2c/muxes/gpio-i2cmux.c
@@ -0,0 +1,184 @@
+/*
+ * I2C multiplexer using GPIO API
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/gpio-i2cmux.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+struct gpiomux {
+ struct i2c_adapter *parent;
+ struct i2c_adapter **adap; /* child busses */
+ struct gpio_i2cmux_platform_data data;
+};
+
+static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+{
+ int i;
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+ gpio_set_value(mux->data.gpios[i], val & (1 << i));
+}
+
+static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+{
+ struct gpiomux *mux = data;
+
+ gpiomux_set(mux, mux->data.values[chan]);
+
+ return 0;
+}
+
+static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+{
+ struct gpiomux *mux = data;
+
+ gpiomux_set(mux, mux->data.idle);
+
+ return 0;
+}
+
+static int __devinit gpiomux_probe(struct platform_device *pdev)
+{
+ struct gpiomux *mux;
+ struct gpio_i2cmux_platform_data *pdata;
+ struct i2c_adapter *parent;
+ int (*deselect) (struct i2c_adapter *, void *, u32);
+ unsigned initial_state;
+ int i, ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "Missing platform data\n");
+ return -ENODEV;
+ }
+
+ parent = i2c_get_adapter(pdata->parent);
+ if (!parent) {
+ dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+ pdata->parent);
+ return -ENODEV;
+ }
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ mux->parent = parent;
+ mux->data = *pdata;
+ mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
+ GFP_KERNEL);
+ if (!mux->adap) {
+ ret = -ENOMEM;
+ goto alloc_failed2;
+ }
+
+ if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+ initial_state = pdata->idle;
+ deselect = gpiomux_deselect;
+ } else {
+ initial_state = pdata->values[0];
+ deselect = NULL;
+ }
+
+ for (i = 0; i < pdata->n_gpios; i++) {
+ ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+ if (ret)
+ goto err_request_gpio;
+ gpio_direction_output(pdata->gpios[i],
+ initial_state & (1 << i));
+ }
+
+ for (i = 0; i < pdata->n_values; i++) {
+ u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
+
+ mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
+ gpiomux_select, deselect);
+ if (!mux->adap[i]) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+ goto add_adapter_failed;
+ }
+ }
+
+ dev_info(&pdev->dev, "%d port mux on %s adapter\n",
+ pdata->n_values, parent->name);
+
+ platform_set_drvdata(pdev, mux);
+
+ return 0;
+
+add_adapter_failed:
+ for (; i > 0; i--)
+ i2c_del_mux_adapter(mux->adap[i - 1]);
+ i = pdata->n_gpios;
+err_request_gpio:
+ for (; i > 0; i--)
+ gpio_free(pdata->gpios[i - 1]);
+ kfree(mux->adap);
+alloc_failed2:
+ kfree(mux);
+alloc_failed:
+ i2c_put_adapter(parent);
+
+ return ret;
+}
+
+static int __devexit gpiomux_remove(struct platform_device *pdev)
+{
+ struct gpiomux *mux = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < mux->data.n_values; i++)
+ i2c_del_mux_adapter(mux->adap[i]);
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+ gpio_free(mux->data.gpios[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ i2c_put_adapter(mux->parent);
+ kfree(mux->adap);
+ kfree(mux);
+
+ return 0;
+}
+
+static struct platform_driver gpiomux_driver = {
+ .probe = gpiomux_probe,
+ .remove = __devexit_p(gpiomux_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "gpio-i2cmux",
+ },
+};
+
+static int __init gpiomux_init(void)
+{
+ return platform_driver_register(&gpiomux_driver);
+}
+
+static void __exit gpiomux_exit(void)
+{
+ platform_driver_unregister(&gpiomux_driver);
+}
+
+module_init(gpiomux_init);
+module_exit(gpiomux_exit);
+
+MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
+MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-i2cmux");
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b8740..c3f5aca4ef0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
+#ifdef notyet
int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
{
struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
+#endif
static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49bdcc..c5406da3f4c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
int iwch_post_zb_read(struct iwch_qp *qhp);
int iwch_register_device(struct iwch_dev *dev);
void iwch_unregister_device(struct iwch_dev *dev);
-int iwch_quiesce_qps(struct iwch_cq *chp);
-int iwch_resume_qps(struct iwch_cq *chp);
void stop_read_rep_timer(struct iwch_qp *qhp);
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137181d..1b4cd09f74d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@ out:
PDBG("%s exit state %d\n", __func__, qhp->attr.state);
return ret;
}
-
-static int quiesce_qp(struct iwch_qp *qhp)
-{
- spin_lock_irq(&qhp->lock);
- iwch_quiesce_tid(qhp->ep);
- qhp->flags |= QP_QUIESCED;
- spin_unlock_irq(&qhp->lock);
- return 0;
-}
-
-static int resume_qp(struct iwch_qp *qhp)
-{
- spin_lock_irq(&qhp->lock);
- iwch_resume_tid(qhp->ep);
- qhp->flags &= ~QP_QUIESCED;
- spin_unlock_irq(&qhp->lock);
- return 0;
-}
-
-int iwch_quiesce_qps(struct iwch_cq *chp)
-{
- int i;
- struct iwch_qp *qhp;
-
- for (i=0; i < T3_MAX_NUM_QP; i++) {
- qhp = get_qhp(chp->rhp, i);
- if (!qhp)
- continue;
- if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
- quiesce_qp(qhp);
- continue;
- }
- if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
- quiesce_qp(qhp);
- }
- return 0;
-}
-
-int iwch_resume_qps(struct iwch_cq *chp)
-{
- int i;
- struct iwch_qp *qhp;
-
- for (i=0; i < T3_MAX_NUM_QP; i++) {
- qhp = get_qhp(chp->rhp, i);
- if (!qhp)
- continue;
- if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
- resume_qp(qhp);
- continue;
- }
- if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
- resume_qp(qhp);
- }
- return 0;
-}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cdb433..cc600c2dd0b 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -760,7 +760,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_zb_read(struct c4iw_qp *qhp);
int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb2505ea..20800900ef3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
}
}
-int c4iw_post_zb_read(struct c4iw_qp *qhp)
-{
- union t4_wr *wqe;
- struct sk_buff *skb;
- u8 len16;
-
- PDBG("%s enter\n", __func__);
- skb = alloc_skb(40, GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
- return -ENOMEM;
- }
- set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
-
- wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
- memset(wqe, 0, sizeof wqe->read);
- wqe->read.r2 = cpu_to_be64(0);
- wqe->read.stag_sink = cpu_to_be32(1);
- wqe->read.to_sink_hi = cpu_to_be32(0);
- wqe->read.to_sink_lo = cpu_to_be32(1);
- wqe->read.stag_src = cpu_to_be32(1);
- wqe->read.plen = cpu_to_be32(0);
- wqe->read.to_src_hi = cpu_to_be32(0);
- wqe->read.to_src_lo = cpu_to_be32(1);
- len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
- init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
-
- return c4iw_ofld_send(&qhp->rhp->rdev, skb);
-}
-
static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
gfp_t gfp)
{
@@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->cookie = (unsigned long) &ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
- c4iw_init_wr_wait(&ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
@@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
- c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc1da7..b33f0457a1f 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
for (j = 0; j < 6; j++) {
if (!pdev->resource[j].start)
continue;
- ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
- j, (unsigned long long)pdev->resource[j].start,
- (unsigned long long)pdev->resource[j].end,
+ ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
+ j, &pdev->resource[j],
(unsigned long long)pci_resource_len(pdev, j));
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2fdf1..e8df155bc3b 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cq->resize_buf = NULL;
cq->resize_umem = NULL;
} else {
+ struct mlx4_ib_cq_buf tmp_buf;
+ int tmp_cqe = 0;
+
spin_lock_irq(&cq->lock);
if (cq->resize_buf) {
mlx4_ib_cq_resize_copy_cqes(cq);
- mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+ tmp_buf = cq->buf;
+ tmp_cqe = cq->ibcq.cqe;
cq->buf = cq->resize_buf->buf;
cq->ibcq.cqe = cq->resize_buf->cqe;
@@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cq->resize_buf = NULL;
}
spin_unlock_irq(&cq->lock);
+
+ if (tmp_cqe)
+ mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
}
goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd63b9e..57ffa50f509 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ return;
/*
* We rely here on the fact that MLX QPs don't use the
* address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659ff0b..03a59534f59 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev,
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ return;
/*
* We rely here on the fact that MLX QPs don't use the
* address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c0e95..5a4c3648472 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nesvnic->nic_index &&
mc_index < max_pft_entries_avaiable) {
nes_debug(NES_DBG_NIC_RX,
- "mc_index=%d skipping nic_index=%d,\
- used for=%d \n", mc_index,
+ "mc_index=%d skipping nic_index=%d, "
+ "used for=%d \n", mc_index,
nesvnic->nic_index,
nesadapter->pft_mcast_map[mc_index]);
mc_index++;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d02d4..73225eee3cc 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@ struct qib_devdata {
void (*f_sdma_hw_start_up)(struct qib_pportdata *);
void (*f_sdma_init_early)(struct qib_pportdata *);
void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
- void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+ void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
u32 (*f_hdrqempty)(struct qib_ctxtdata *);
u64 (*f_portcntr)(struct qib_pportdata *, u32);
u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf880f9..5246aa486bb 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
- (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ (cq->notify == IB_CQ_SOLICITED &&
+ (solicited || entry->status != IB_WC_SUCCESS))) {
cq->notify = IB_CQ_NONE;
cq->triggered++;
/*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd193603fb..23e584f4c36 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
*/
#define QIB_PIO_MAXIBHDR 128
+/*
+ * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define QIB_MAX_PKT_RECV 64
+
struct qlogic_ib_stats qib_stats;
const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
* Returns 1 if error was a CRC, else 0.
* Needed for some chip's synthesized error counters.
*/
-static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
- u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
- struct qib_message_header *hdr)
+static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
+ u32 ctxt, u32 eflags, u32 l, u32 etail,
+ __le32 *rhf_addr, struct qib_message_header *rhdr)
{
u32 ret = 0;
if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
ret = 1;
+ else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
+ /* For TIDERR and RC QPs premptively schedule a NAK */
+ struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
+ struct qib_other_headers *ohdr = NULL;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct qib_qp *qp = NULL;
+ u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
+ u16 lid = be16_to_cpu(hdr->lrh[1]);
+ int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ u32 qp_num;
+ u32 opcode;
+ u32 psn;
+ int diff;
+ unsigned long flags;
+
+ /* Sanity check packet */
+ if (tlen < 24)
+ goto drop;
+
+ if (lid < QIB_MULTICAST_LID_BASE) {
+ lid &= ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid != ppd->lid))
+ goto drop;
+ }
+
+ /* Check for GRH */
+ if (lnh == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == QIB_LRH_GRH) {
+ u32 vtf;
+
+ ohdr = &hdr->u.l.oth;
+ if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else
+ goto drop;
+
+ /* Get opcode and PSN from packet */
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ opcode >>= 24;
+ psn = be32_to_cpu(ohdr->bth[2]);
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ if (qp_num != QIB_MULTICAST_QPN) {
+ int ruc_res;
+ qp = qib_lookup_qpn(ibp, qp_num);
+ if (!qp)
+ goto drop;
+
+ /*
+ * Handle only RC QPs - for other QP types drop error
+ * packet.
+ */
+ spin_lock(&qp->r_lock);
+
+ /* Check for valid receive state. */
+ if (!(ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto unlock;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ spin_lock_irqsave(&qp->s_lock, flags);
+ ruc_res =
+ qib_ruc_check_hdr(
+ ibp, hdr,
+ lnh == QIB_LRH_GRH,
+ qp,
+ be32_to_cpu(ohdr->bth[0]));
+ if (ruc_res) {
+ spin_unlock_irqrestore(&qp->s_lock,
+ flags);
+ goto unlock;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Only deal with RDMA Writes for now */
+ if (opcode <
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+ diff = qib_cmp24(psn, qp->r_psn);
+ if (!qp->r_nak_state && diff >= 0) {
+ ibp->n_rc_seqnak++;
+ qp->r_nak_state =
+ IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence
+ * NAK until all packets
+ * in the receive queue have
+ * been processed.
+ * Otherwise, we end up
+ * propagating congestion.
+ */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |=
+ QIB_R_RSP_NAK;
+ atomic_inc(
+ &qp->refcount);
+ list_add_tail(
+ &qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ } /* Out of sequence NAK */
+ } /* QP Request NAKs */
+ break;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ case IB_QPT_UC:
+ default:
+ /* For now don't handle any other QP types */
+ break;
+ }
+
+unlock:
+ spin_unlock(&qp->r_lock);
+ /*
+ * Notify qib_destroy_qp() if it is waiting
+ * for us to finish.
+ */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ } /* Unicast QP */
+ } /* Valid packet with TIDErr */
+
+drop:
return ret;
}
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
}
- for (last = 0, i = 1; !last && i <= 64; i += !last) {
+ for (last = 0, i = 1; !last; i += !last) {
hdr = dd->f_get_msgheader(dd, rhf_addr);
eflags = qib_hdrget_err_flags(rhf_addr);
etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
* packets; only qibhdrerr should be set.
*/
if (unlikely(eflags))
- crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+ crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
etail, rhf_addr, hdr);
else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@ move_along:
l += rsize;
if (l >= maxcnt)
l = 0;
+ if (i == QIB_MAX_PKT_RECV)
+ last = 1;
+
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
if (dd->flags & QIB_NODMA_RTAIL) {
u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@ move_along:
*/
lval = l;
if (!last && !(i & 0xf)) {
- dd->f_update_usrhead(rcd, lval, updegr, etail);
+ dd->f_update_usrhead(rcd, lval, updegr, etail, i);
updegr = 0;
}
}
@@ -444,7 +585,7 @@ bail:
* if no packets were processed.
*/
lval = (u64)rcd->head | dd->rhdrhead_intr_off;
- dd->f_update_usrhead(rcd, lval, updegr, etail);
+ dd->f_update_usrhead(rcd, lval, updegr, etail, i);
return crcs;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971aff1..75bfad16c11 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
/* find device (with ACTIVE ports) with fewest ctxts in use */
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
- unsigned cused = 0, cfree = 0;
+ unsigned cused = 0, cfree = 0, pusable = 0;
if (!dd)
continue;
if (port && port <= dd->num_pports &&
usable(dd->pport + port - 1))
- dusable = 1;
+ pusable = 1;
else
for (i = 0; i < dd->num_pports; i++)
if (usable(dd->pport + i))
- dusable++;
- if (!dusable)
+ pusable++;
+ if (!pusable)
continue;
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
ctxt++)
@@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
cused++;
else
cfree++;
- if (cfree && cused < inuse) {
+ if (pusable && cfree && cused < inuse) {
udd = dd;
inuse = cused;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29dbb953..774dea897e9 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
}
static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd)
+ u32 updegr, u32 egrhd, u32 npkts)
{
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74e739..127a0d5069f 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2297,7 +2297,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
nchipctxts = qib_read_kreg32(dd, kr_portcnt);
dd->cspec->numctxts = nchipctxts;
if (qib_n_krcv_queues > 1) {
- dd->qpn_mask = 0x3f;
+ dd->qpn_mask = 0x3e;
dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
if (dd->first_user_ctxt > nchipctxts)
dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2703,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
}
static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd)
+ u32 updegr, u32 egrhd, u32 npkts)
{
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443b533..dbbb0e85afe 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+static void serdes_7322_los_enable(struct qib_pportdata *, int);
+static int serdes_7322_init_old(struct qib_pportdata *);
+static int serdes_7322_init_new(struct qib_pportdata *);
#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
@@ -111,6 +114,21 @@ static ushort qib_singleport;
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+/*
+ * Receive header queue sizes
+ */
+static unsigned qib_rcvhdrcnt;
+module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
+
+static unsigned qib_rcvhdrsize;
+module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
+
+static unsigned qib_rcvhdrentsize;
+module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
+
#define MAX_ATTEN_LEN 64 /* plenty for any real system */
/* for read back, default index is ~5m copper cable */
static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
+#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
#define H1_FORCE_VAL 8
@@ -604,6 +623,7 @@ struct qib_chippport_specific {
u8 ibmalfusesnap;
struct qib_qsfp_data qsfp_data;
char epmsgbuf[192]; /* for port error interrupt msg buffer */
+ u8 bounced;
};
static struct {
@@ -1677,6 +1697,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
force_h1(ppd);
ppd->cpspec->qdr_reforce = 1;
+ if (!ppd->dd->cspec->r1)
+ serdes_7322_los_enable(ppd, 0);
} else if (ppd->cpspec->qdr_reforce &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
(ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1714,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
adj_tx_serdes(ppd);
- if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
- ppd->cpspec->qdr_dfe_on = 1;
- ppd->cpspec->qdr_dfe_time = 0;
- /* On link down, reenable QDR adaptation */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 :
- QDR_STATIC_ADAPT_DOWN);
+ if (ibclt != IB_7322_LT_STATE_LINKUP) {
+ u8 ltstate = qib_7322_phys_portstate(ibcst);
+ u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
+ LinkTrainingState);
+ if (!ppd->dd->cspec->r1 &&
+ pibclt == IB_7322_LT_STATE_LINKUP &&
+ ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+ /* If the link went down (but no into recovery,
+ * turn LOS back on */
+ serdes_7322_los_enable(ppd, 1);
+ if (!ppd->cpspec->qdr_dfe_on &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+ ppd->cpspec->qdr_dfe_on = 1;
+ ppd->cpspec->qdr_dfe_time = 0;
+ /* On link down, reenable QDR adaptation */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 :
+ QDR_STATIC_ADAPT_DOWN);
+ printk(KERN_INFO QIB_DRV_NAME
+ " IB%u:%u re-enabled QDR adaptation "
+ "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
+ }
}
}
+static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
+
/*
* This is per-pport error handling.
* will likely get it's own MSIx interrupt (one for each port,
@@ -1840,7 +1881,23 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
IB_PHYSPORTSTATE_DISABLED)
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- else
+ else {
+ u32 lstate;
+ /*
+ * We need the current logical link state before
+ * lflags are set in handle_e_ibstatuschanged.
+ */
+ lstate = qib_7322_iblink_state(ibcs);
+
+ if (IS_QMH(dd) && !ppd->cpspec->bounced &&
+ ltstate == IB_PHYSPORTSTATE_LINKUP &&
+ (lstate >= IB_PORT_INIT &&
+ lstate <= IB_PORT_ACTIVE)) {
+ ppd->cpspec->bounced = 1;
+ qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+ }
+
/*
* Since going into a recovery state causes the link
* state to go down and since recovery is transitory,
@@ -1854,6 +1911,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
}
if (*msg && iserr)
qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2785,7 +2843,6 @@ static irqreturn_t qib_7322intr(int irq, void *data)
ctxtrbits &= ~rmask;
if (dd->rcd[i]) {
qib_kreceive(dd->rcd[i], NULL, &npkts);
- adjust_rcv_timeout(dd->rcd[i], npkts);
}
}
rmask <<= 1;
@@ -2835,7 +2892,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
(1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
qib_kreceive(rcd, NULL, &npkts);
- adjust_rcv_timeout(rcd, npkts);
return IRQ_HANDLED;
}
@@ -3157,6 +3213,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
case BOARD_QME7342:
n = "InfiniPath_QME7342";
break;
+ case 8:
+ n = "InfiniPath_QME7362";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
case 15:
n = "InfiniPath_QLE7342_TEST";
dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3535,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
dd->cspec->numctxts = nchipctxts;
if (qib_n_krcv_queues > 1 && dd->num_pports) {
- /*
- * Set the mask for which bits from the QPN are used
- * to select a context number.
- */
- dd->qpn_mask = 0x3f;
dd->first_user_ctxt = NUM_IB_PORTS +
(qib_n_krcv_queues - 1) * dd->num_pports;
if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3585,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
/* kr_rcvegrcnt changes based on the number of contexts enabled */
dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
- dd->num_pports > 1 ? 1024U : 2048U);
+ if (qib_rcvhdrcnt)
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
+ else
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->num_pports > 1 ? 1024U : 2048U);
}
static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4060,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
}
static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd)
+ u32 updegr, u32 egrhd, u32 npkts)
{
+ /*
+ * Need to write timeout register before updating rcvhdrhead to ensure
+ * that the timer is enabled on reception of a packet.
+ */
+ if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+ adjust_rcv_timeout(rcd, npkts);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
@@ -5522,7 +5586,7 @@ static void qsfp_7322_event(struct work_struct *work)
u64 now = get_jiffies_64();
if (time_after64(now, pwrup))
break;
- msleep(1);
+ msleep(20);
}
ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
/*
@@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
u32 pidx, unit, port, deflt, h1;
unsigned long val;
int any = 0, seth1;
+ int txdds_size;
str = txselect_list;
@@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd->pport[pidx].cpspec->no_eep = deflt;
+ txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
+ if (IS_QME(dd) || IS_QMH(dd))
+ txdds_size += TXDDS_MFG_SZ;
+
while (*nxt && nxt[1]) {
str = ++nxt;
unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
;
continue;
}
- if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+ if (val >= txdds_size)
continue;
seth1 = 0;
h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5661,10 +5730,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
return -ENOSPC;
}
val = simple_strtoul(str, &n, 0);
- if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ TXDDS_MFG_SZ)) {
printk(KERN_INFO QIB_DRV_NAME
"txselect_values must start with a number < %d\n",
- TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+ TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
return -EINVAL;
}
strcpy(txselect_list, str);
@@ -5810,7 +5880,8 @@ static void write_7322_initregs(struct qib_devdata *dd)
unsigned n, regno;
unsigned long flags;
- if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+ if (dd->n_krcv_queues < 2 ||
+ !dd->pport[pidx].link_speed_supported)
continue;
ppd = &dd->pport[pidx];
@@ -6097,8 +6168,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
ppd++;
}
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rcvhdrentsize = qib_rcvhdrentsize ?
+ qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = qib_rcvhdrsize ?
+ qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
/* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6568,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
/* make sure we see an updated copy next time around */
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
sleeps++;
- msleep(1);
+ msleep(20);
}
switch (which) {
@@ -6993,6 +7066,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
{ 0, 1, 0, 12 }, /* QMH7342 backplane settings */
};
+static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 0 }, /* QME7342 mfg settings */
+ { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
+};
+
static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
unsigned atten)
{
@@ -7066,6 +7145,16 @@ static void find_best_ent(struct qib_pportdata *ppd,
*sdr_dds = &txdds_extra_sdr[idx];
*ddr_dds = &txdds_extra_ddr[idx];
*qdr_dds = &txdds_extra_qdr[idx];
+ } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
+ ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ TXDDS_MFG_SZ)) {
+ idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+ printk(KERN_INFO QIB_DRV_NAME
+ " IB%u:%u use idx %u into txdds_mfg\n",
+ ppd->dd->unit, ppd->port, idx);
+ *sdr_dds = &txdds_extra_mfg[idx];
+ *ddr_dds = &txdds_extra_mfg[idx];
+ *qdr_dds = &txdds_extra_mfg[idx];
} else {
/* this shouldn't happen, it's range checked */
*sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7299,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
}
}
+static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
+{
+ u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
+ ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
+ if (enable)
+ data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ else
+ data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ qib_write_kreg_port(ppd, krp_serdesctrl, data);
+}
+
static int serdes_7322_init(struct qib_pportdata *ppd)
{
- u64 data;
+ int ret = 0;
+ if (ppd->dd->cspec->r1)
+ ret = serdes_7322_init_old(ppd);
+ else
+ ret = serdes_7322_init_new(ppd);
+ return ret;
+}
+
+static int serdes_7322_init_old(struct qib_pportdata *ppd)
+{
u32 le_val;
/*
@@ -7270,11 +7380,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
- data = qib_read_kreg_port(ppd, krp_serdesctrl);
- /* Turn off IB latency mode */
- data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
- qib_write_kreg_port(ppd, krp_serdesctrl, data |
- SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+ serdes_7322_los_enable(ppd, 1);
/* rxbistena; set 0 to avoid effects of it switch later */
ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7420,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
return 0;
}
+static int serdes_7322_init_new(struct qib_pportdata *ppd)
+{
+ u64 tstart;
+ u32 le_val, rxcaldone;
+ int chan, chan_done = (1 << SERDES_CHANS) - 1;
+
+ /*
+ * Initialize the Tx DDS tables. Also done every QSFP event,
+ * for adapters with QSFP
+ */
+ init_txdds_table(ppd, 0);
+
+ /* Clear cmode-override, may be set from older driver */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+ /* ensure no tx overrides from earlier driver loads */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
+ /* START OF LSI SUGGESTED SERDES BRINGUP */
+ /* Reset - Calibration Setup */
+ /* Stop DFE adaptaion */
+ ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
+ /* Disable LE1 */
+ ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
+ /* Disable autoadapt for LE1 */
+ ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
+ /* Disable LE2 */
+ ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
+ /* Disable VGA */
+ ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+ /* Disable AFE Offset Cancel */
+ ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
+ /* Disable Timing Loop */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
+ /* Disable Frequency Loop */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
+ /* Disable Baseline Wander Correction */
+ ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
+ /* Disable RX Calibration */
+ ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+ /* Disable RX Offset Calibration */
+ ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
+ /* Select BB CDR */
+ ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
+ /* CDR Step Size */
+ ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
+ /* Enable phase Calibration */
+ ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
+ /* DFE Bandwidth [2:14-12] */
+ ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
+ /* DFE Config (4 taps only) */
+ ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
+ /* Gain Loop Bandwidth */
+ if (!ppd->dd->cspec->r1) {
+ ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
+ ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
+ } else {
+ ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
+ }
+ /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
+ /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
+ /* Data Rate Select [5:7-6] (leave as default) */
+ /* RX Parralel Word Width [3:10-8] (leave as default) */
+
+ /* RX REST */
+ /* Single- or Multi-channel reset */
+ /* RX Analog reset */
+ /* RX Digital reset */
+ ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
+ msleep(20);
+ /* RX Analog reset */
+ ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
+ msleep(20);
+ /* RX Digital reset */
+ ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
+ msleep(20);
+
+ /* setup LoS params; these are subsystem, so chan == 5 */
+ /* LoS filter threshold_count on, ch 0-3, set to 8 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+ /* LoS filter threshold_count off, ch 0-3, set to 4 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+ /* LoS filter select enabled */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+ /* LoS target data: SDR=4, DDR=2, QDR=1 */
+ ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+ ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+ /* Turn on LOS on initial SERDES init */
+ serdes_7322_los_enable(ppd, 1);
+ /* FLoop LOS gate: PPM filter enabled */
+ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+ /* RX LATCH CALIBRATION */
+ /* Enable Eyefinder Phase Calibration latch */
+ ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
+ /* Enable RX Offset Calibration latch */
+ ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
+ msleep(20);
+ /* Start Calibration */
+ ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
+ tstart = get_jiffies_64();
+ while (chan_done &&
+ !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
+ msleep(20);
+ for (chan = 0; chan < SERDES_CHANS; ++chan) {
+ rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+ (chan + (chan >> 1)),
+ 25, 0, 0);
+ if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
+ (~chan_done & (1 << chan)) == 0)
+ chan_done &= ~(1 << chan);
+ }
+ }
+ if (chan_done) {
+ printk(KERN_INFO QIB_DRV_NAME
+ " Serdes %d calibration not done after .5 sec: 0x%x\n",
+ IBSD(ppd->hw_pidx), chan_done);
+ } else {
+ for (chan = 0; chan < SERDES_CHANS; ++chan) {
+ rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+ (chan + (chan >> 1)),
+ 25, 0, 0);
+ if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
+ printk(KERN_INFO QIB_DRV_NAME
+ " Serdes %d chan %d calibration "
+ "failed\n", IBSD(ppd->hw_pidx), chan);
+ }
+ }
+
+ /* Turn off Calibration */
+ ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+ msleep(20);
+
+ /* BRING RX UP */
+ /* Set LE2 value (May be overridden in qsfp_7322_event) */
+ le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+ /* Set LE2 Loop bandwidth */
+ ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
+ /* Enable LE2 */
+ ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
+ msleep(20);
+ /* Enable H0 only */
+ ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
+ /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+ le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+ ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+ /* Enable VGA */
+ ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+ msleep(20);
+ /* Set Frequency Loop Bandwidth */
+ ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
+ /* Enable Frequency Loop */
+ ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
+ /* Set Timing Loop Bandwidth */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+ /* Enable Timing Loop */
+ ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
+ msleep(50);
+ /* Enable DFE
+ * Set receive adaptation mode. SDR and DDR adaptation are
+ * always on, and QDR is initially enabled; later disabled.
+ */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+ ppd->cpspec->qdr_dfe_on = 1;
+ /* Disable LE1 */
+ ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
+ /* Disable auto adapt for LE1 */
+ ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
+ msleep(20);
+ /* Enable AFE Offset Cancel */
+ ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
+ /* Enable Baseline Wander Correction */
+ ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
+ /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+ ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+ /* VGA output common mode */
+ ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+
+ return 0;
+}
+
/* start adjust QMH serdes parameters */
static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b50393604..7896afbb9ce 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -92,9 +92,11 @@ unsigned long *qib_cpulist;
/* set number of contexts we'll actually use */
void qib_set_ctxtcnt(struct qib_devdata *dd)
{
- if (!qib_cfgctxts)
+ if (!qib_cfgctxts) {
dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
- else if (qib_cfgctxts < dd->num_pports)
+ if (dd->cfgctxts > dd->ctxtcnt)
+ dd->cfgctxts = dd->ctxtcnt;
+ } else if (qib_cfgctxts < dd->num_pports)
dd->cfgctxts = dd->ctxtcnt;
else if (qib_cfgctxts <= dd->ctxtcnt)
dd->cfgctxts = qib_cfgctxts;
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a40828a10..a693c56ec8a 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
/* start a 75msec timer to clear symbol errors */
mod_timer(&ppd->symerr_clear_timer,
msecs_to_jiffies(75));
- } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
/* active, but not active defered */
qib_hol_up(ppd); /* useful only for 6120 now */
*ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb153d5..8fd19a47df0 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_mregion *mr;
unsigned n, m;
size_t off;
- int ret = 0;
unsigned long flags;
/*
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
if (!dev->dma_mr)
goto bail;
atomic_inc(&dev->dma_mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
isge->mr = dev->dma_mr;
isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc))
goto bail;
+ atomic_inc(&mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
off += mr->offset;
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
+ if (mr->page_shift) {
+ /*
+ page sizes are uniform power of 2 so no loop is necessary
+ entries_spanned_by_off is the number of times the loop below
+ would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off/QIB_SEGSZ;
+ n = entries_spanned_by_off%QIB_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
}
}
- atomic_inc(&mr->refcount);
isge->mr = mr;
isge->vaddr = mr->map[m]->segs[n].vaddr + off;
isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
isge->m = m;
isge->n = n;
ok:
- ret = 1;
+ return 1;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
- return ret;
+ return 0;
}
/**
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
struct qib_mregion *mr;
unsigned n, m;
size_t off;
- int ret = 0;
unsigned long flags;
/*
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
if (!dev->dma_mr)
goto bail;
atomic_inc(&dev->dma_mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
sge->mr = dev->dma_mr;
sge->vaddr = (void *) vaddr;
sge->length = len;
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0))
goto bail;
+ atomic_inc(&mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
off += mr->offset;
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
+ if (mr->page_shift) {
+ /*
+ page sizes are uniform power of 2 so no loop is necessary
+ entries_spanned_by_off is the number of times the loop below
+ would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off/QIB_SEGSZ;
+ n = entries_spanned_by_off%QIB_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
}
}
- atomic_inc(&mr->refcount);
sge->mr = mr;
sge->vaddr = mr->map[m]->segs[n].vaddr + off;
sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
sge->m = m;
sge->n = n;
ok:
- ret = 1;
+ return 1;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f3a8f..5ad224e4a38 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
lid = be16_to_cpu(pip->lid);
/* Must be a valid unicast LID address. */
if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
- goto err;
- if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
if (ppd->lid != lid)
qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
msl = pip->neighbormtu_mastersmsl & 0xF;
/* Must be a valid unicast LID address. */
if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
- goto err;
- if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
spin_lock_irqsave(&ibp->lock, flags);
if (ibp->sm_ah) {
if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (lwe == 0xFF)
lwe = ppd->link_width_supported;
else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
- goto err;
- set_link_width_enabled(ppd, lwe);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (lwe != ppd->link_width_enabled)
+ set_link_width_enabled(ppd, lwe);
}
lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (lse == 15)
lse = ppd->link_speed_supported;
else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
- goto err;
- set_link_speed_enabled(ppd, lse);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (lse != ppd->link_speed_enabled)
+ set_link_speed_enabled(ppd, lse);
}
/* Set link down default state. */
@@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
IB_LINKINITCMD_POLL);
break;
default:
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
}
ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
if (mtu == -1)
- goto err;
- qib_set_mtu(ppd, mtu);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ qib_set_mtu(ppd, mtu);
/* Set operational VLs */
vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
if (vls) {
if (vls > ppd->vls_supported)
- goto err;
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
}
if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
ore = pip->localphyerrors_overrunerrors;
if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
if (set_overrunthreshold(ppd, (ore & 0xF)))
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
@@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
state = pip->linkspeed_portstate & 0xF;
lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
/*
* Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
lstate = QIB_IB_LINKDOWN;
else if (lstate == 3)
lstate = QIB_IB_LINKDOWN_DISABLE;
- else
- goto err;
+ else {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ break;
+ }
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_LINKV;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
break;
default:
- /* XXX We have already partially updated our state! */
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
}
ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f6385..08944e2ee33 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
/* Fast memory region */
struct qib_fmr {
struct ib_fmr ibfmr;
- u8 page_shift;
struct qib_mregion mr; /* must be last */
};
@@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
goto bail;
}
mr->mr.mapsz = m;
+ mr->mr.page_shift = 0;
mr->mr.max_segs = count;
/*
@@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
+ if (is_power_of_2(umem->page_size))
+ mr->mr.page_shift = ilog2(umem->page_size);
m = 0;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->page_shift = fmr_attr->page_shift;
+ fmr->mr.page_shift = fmr_attr->page_shift;
atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = iova;
fmr->mr.iova = iova;
- ps = 1 << fmr->page_shift;
+ ps = 1 << fmr->mr.page_shift;
fmr->mr.length = list_len * ps;
m = 0;
n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851d2de..e16751f8639 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
struct qpn_map *map, unsigned off,
- unsigned r)
+ unsigned n)
{
if (qpt->mask) {
off++;
- if ((off & qpt->mask) >> 1 != r)
- off = ((off & qpt->mask) ?
- (off | qpt->mask) + 1 : off) | (r << 1);
+ if (((off & qpt->mask) >> 1) >= n)
+ off = (off | qpt->mask) + 2;
} else
off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
return off;
@@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
u32 i, offset, max_scan, qpn;
struct qpn_map *map;
u32 ret;
- int r;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
goto bail;
}
- r = smp_processor_id();
- if (r >= dd->n_krcv_queues)
- r %= dd->n_krcv_queues;
- qpn = qpt->last + 1;
+ qpn = qpt->last + 2;
if (qpn >= QPN_MAX)
qpn = 2;
- if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
- qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
- (r << 1);
+ if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+ qpn = (qpn | qpt->mask) + 2;
offset = qpn & BITS_PER_PAGE_MASK;
map = &qpt->map[qpn / BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
ret = qpn;
goto bail;
}
- offset = find_next_offset(qpt, map, offset, r);
+ offset = find_next_offset(qpt, map, offset,
+ dd->n_krcv_queues);
qpn = mk_qpn(qpt, map, offset);
/*
* This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
if (qpt->nmaps == QPNMAP_ENTRIES)
break;
map = &qpt->map[qpt->nmaps++];
- offset = qpt->mask ? (r << 1) : 0;
+ offset = 0;
} else if (map < &qpt->map[qpt->nmaps]) {
++map;
- offset = qpt->mask ? (r << 1) : 0;
+ offset = 0;
} else {
map = &qpt->map[0];
- offset = qpt->mask ? (r << 1) : 2;
+ offset = 2;
}
qpn = mk_qpn(qpt, map, offset);
}
@@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
+
+ if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
+
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
}
qp->ibqp.qp_num = err;
qp->port_num = init_attr->port_num;
- qp->processor_id = smp_processor_id();
qib_reset_qp(qp, init_attr->qp_type);
break;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb715779..8245237b67c 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
struct qib_ctxtdata *rcd)
{
struct qib_swqe *wqe;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
enum ib_wc_status status;
unsigned long flags;
int diff;
@@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
u32 aeth;
u64 val;
+ if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
+ /*
+ * If ACK'd PSN on SDMA busy list try to make progress to
+ * reclaim SDMA credits.
+ */
+ if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
+ (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
+
+ /*
+ * If send tasklet not running attempt to progress
+ * SDMA queue.
+ */
+ if (!(qp->s_flags & QIB_S_BUSY)) {
+ /* Acquire SDMA Lock */
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ /* Invoke sdma make progress */
+ qib_sdma_make_progress(ppd);
+ /* Release SDMA Lock */
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ }
+ }
+
spin_lock_irqsave(&qp->s_lock, flags);
/* Ignore invalid responses. */
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2a1f8..4a51fd1e9cb 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
- /* Get the number of bytes the message was padded by. */
+ /*
+ * Get the number of bytes the message was padded by
+ * and drop incomplete packets.
+ */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4))) {
- /* Drop incomplete packets. */
- ibp->n_pkt_drops++;
- goto bail;
- }
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+
tlen -= hdrsize + pad + 4;
/*
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
*/
if (qp->ibqp.qp_num) {
if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE)) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ hdr->lrh[3] == IB_LID_PERMISSIVE))
+ goto drop;
if (qp->ibqp.qp_num > 1) {
u16 pkey1, pkey2;
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
- goto bail;
+ return;
}
}
if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
- goto bail;
+ return;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
(tlen != 256 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+ goto drop;
} else {
struct ib_smp *smp;
/* Drop invalid MAD packets (see 13.5.3.1). */
- if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
+ goto drop;
smp = (struct ib_smp *) data;
if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE) &&
- smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ goto drop;
}
/*
@@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- hdrsize += sizeof(u32);
+ tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
- } else {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ } else
+ goto drop;
/*
* A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
- ibp->n_pkt_drops++;
- return;
+ goto drop;
}
if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail:;
+ return;
+
+drop:
+ ibp->n_pkt_drops++;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06b5e8..66208bcd7c1 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
kmem_cache_free(pq->pkt_slab, pkt);
}
+ INIT_LIST_HEAD(list);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c127322..63b22a9a7fe 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@ struct qib_mregion {
int access_flags;
u32 max_segs; /* number of qib_segs in all the arrays */
u32 mapsz; /* size of the map array */
+ u8 page_shift; /* 0 - non unform/non powerof2 sizes */
atomic_t refcount;
struct qib_segarray *map[0]; /* the segments */
};
@@ -435,7 +436,6 @@ struct qib_qp {
spinlock_t r_lock; /* used for APM */
spinlock_t s_lock;
atomic_t s_dma_busy;
- unsigned processor_id; /* Processor ID QP is bound to */
u32 s_flags;
u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */
@@ -813,13 +813,8 @@ extern struct workqueue_struct *qib_cq_wq;
*/
static inline void qib_schedule_send(struct qib_qp *qp)
{
- if (qib_send_ok(qp)) {
- if (qp->processor_id == smp_processor_id())
- queue_work(qib_wq, &qp->s_work);
- else
- queue_work_on(qp->processor_id,
- qib_wq, &qp->s_work);
- }
+ if (qib_send_ok(qp))
+ queue_work(qib_wq, &qp->s_work);
}
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc51f1..55855eeabae 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
config INFINIBAND_IPOIB
tristate "IP-over-InfiniBand"
depends on NETDEVICES && INET && (IPV6 || IPV6=n)
- select INET_LRO
---help---
Support for the IP-over-InfiniBand protocol (IPoIB). This
transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983a5fd..ab97f92fc25 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_sa.h>
-#include <linux/inet_lro.h>
+#include <linux/sched.h>
/* constants */
@@ -100,9 +100,6 @@ enum {
IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MAX_LRO_DESCRIPTORS = 8,
- IPOIB_LRO_MAX_AGGR = 64,
-
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
};
@@ -262,11 +259,6 @@ struct ipoib_ethtool_st {
u16 max_coalesced_frames;
};
-struct ipoib_lro {
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
-};
-
/*
* Device private locking: network stack tx_lock protects members used
* in TX fast path, lock protects everything else. lock nests inside
@@ -352,8 +344,6 @@ struct ipoib_dev_priv {
int hca_caps;
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
-
- struct ipoib_lro lro;
};
struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb1004114de..c1c49f2d35b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1480,6 +1480,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ priv->dev->features |= NETIF_F_GRO;
if (priv->hca_caps & IB_DEVICE_UD_TSO)
dev->features |= NETIF_F_TSO;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c82ed..19f7f5206f7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev,
return 0;
}
-static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
- "LRO aggregated", "LRO flushed",
- "LRO avg aggr", "LRO no desc"
-};
-
-static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- switch (stringset) {
- case ETH_SS_STATS:
- memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys));
- break;
- }
-}
-
-static int ipoib_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ipoib_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void ipoib_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, uint64_t *data)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- int index = 0;
-
- /* Get LRO statistics */
- data[index++] = priv->lro.lro_mgr.stats.aggregated;
- data[index++] = priv->lro.lro_mgr.stats.flushed;
- if (priv->lro.lro_mgr.stats.flushed)
- data[index++] = priv->lro.lro_mgr.stats.aggregated /
- priv->lro.lro_mgr.stats.flushed;
- else
- data[index++] = 0;
- data[index++] = priv->lro.lro_mgr.stats.no_desc;
-}
-
-static int ipoib_set_flags(struct net_device *dev, u32 flags)
-{
- return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
-}
-
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_rx_csum = ipoib_get_rx_csum,
.set_tso = ipoib_set_tso,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
- .get_flags = ethtool_op_get_flags,
- .set_flags = ipoib_set_flags,
- .get_strings = ipoib_get_strings,
- .get_sset_count = ipoib_get_sset_count,
- .get_ethtool_stats = ipoib_get_ethtool_stats,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa71903d6e..806d0292dc3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (dev->features & NETIF_F_LRO)
- lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
- else
- netif_receive_skb(skb);
+ napi_gro_receive(&priv->napi, skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@ poll_more:
}
if (done < budget) {
- if (dev->features & NETIF_F_LRO)
- lro_flush_all(&priv->lro.lro_mgr);
-
napi_complete(napi);
if (unlikely(ib_req_notify_cq(priv->recv_cq,
IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc73ed9..7a07a728fe0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
-static int lro;
-module_param(lro, bool, 0444);
-MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
-
-static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
-module_param(lro_max_aggr, int, 0644);
-MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
- "(default = 64)");
-
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
int ipoib_debug_level;
@@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = {
.create = ipoib_hard_header,
};
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- unsigned int ip_len;
- struct iphdr *iph;
-
- if (unlikely(skb->protocol != htons(ETH_P_IP)))
- return -1;
-
- /*
- * In the future we may add an else clause that verifies the
- * checksum and allows devices which do not calculate checksum
- * to use LRO.
- */
- if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
- return -1;
-
- /* Check for non-TCP packet */
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
-
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- /* check if IP header and TCP header are complete */
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
- return -1;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
-
- return 0;
-}
-
-static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
-{
- priv->lro.lro_mgr.max_aggr = lro_max_aggr;
- priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
- priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
- priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
- priv->lro.lro_mgr.features = LRO_F_NAPI;
- priv->lro.lro_mgr.dev = priv->dev;
- priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-}
-
static const struct net_device_ops ipoib_netdev_ops = {
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
@@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev)
priv->dev = dev;
- ipoib_lro_setup(priv);
-
spin_lock_init(&priv->lock);
mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1159,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
}
- if (lro)
- priv->dev->features |= NETIF_F_LRO;
+ priv->dev->features |= NETIF_F_GRO;
if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347a771..4b62105ed1e 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target)
wait_for_completion(&target->done);
}
+static bool srp_change_state(struct srp_target_port *target,
+ enum srp_target_state old,
+ enum srp_target_state new)
+{
+ bool changed = false;
+
+ spin_lock_irq(&target->lock);
+ if (target->state == old) {
+ target->state = new;
+ changed = true;
+ }
+ spin_unlock_irq(&target->lock);
+ return changed;
+}
+
static void srp_remove_work(struct work_struct *work)
{
struct srp_target_port *target =
container_of(work, struct srp_target_port, work);
- spin_lock_irq(target->scsi_host->host_lock);
- if (target->state != SRP_TARGET_DEAD) {
- spin_unlock_irq(target->scsi_host->host_lock);
+ if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
return;
- }
- target->state = SRP_TARGET_REMOVED;
- spin_unlock_irq(target->scsi_host->host_lock);
spin_lock(&target->srp_host->target_lock);
list_del(&target->list);
@@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scsi_sg_count(scmnd), scmnd->sc_data_direction);
}
-static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_remove_req(struct srp_target_port *target,
+ struct srp_request *req, s32 req_lim_delta)
{
+ unsigned long flags;
+
srp_unmap_data(req->scmnd, target, req);
- list_move_tail(&req->list, &target->free_reqs);
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += req_lim_delta;
+ req->scmnd = NULL;
+ list_add_tail(&req->list, &target->free_reqs);
+ spin_unlock_irqrestore(&target->lock, flags);
}
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
{
req->scmnd->result = DID_RESET << 16;
req->scmnd->scsi_done(req->scmnd);
- srp_remove_req(target, req);
+ srp_remove_req(target, req, 0);
}
static int srp_reconnect_target(struct srp_target_port *target)
{
struct ib_qp_attr qp_attr;
- struct srp_request *req, *tmp;
struct ib_wc wc;
- int ret;
+ int i, ret;
- spin_lock_irq(target->scsi_host->host_lock);
- if (target->state != SRP_TARGET_LIVE) {
- spin_unlock_irq(target->scsi_host->host_lock);
+ if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
return -EAGAIN;
- }
- target->state = SRP_TARGET_CONNECTING;
- spin_unlock_irq(target->scsi_host->host_lock);
srp_disconnect_target(target);
/*
@@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target)
while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
; /* nothing */
- spin_lock_irq(target->scsi_host->host_lock);
- list_for_each_entry_safe(req, tmp, &target->req_queue, list)
- srp_reset_req(target, req);
- spin_unlock_irq(target->scsi_host->host_lock);
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ struct srp_request *req = &target->req_ring[i];
+ if (req->scmnd)
+ srp_reset_req(target, req);
+ }
- target->rx_head = 0;
- target->tx_head = 0;
- target->tx_tail = 0;
+ INIT_LIST_HEAD(&target->free_tx);
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
+ list_add(&target->tx_ring[i]->list, &target->free_tx);
target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret)
goto err;
- spin_lock_irq(target->scsi_host->host_lock);
- if (target->state == SRP_TARGET_CONNECTING) {
- ret = 0;
- target->state = SRP_TARGET_LIVE;
- } else
+ if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
ret = -EAGAIN;
- spin_unlock_irq(target->scsi_host->host_lock);
return ret;
@@ -620,17 +627,20 @@ err:
/*
* We couldn't reconnect, so kill our target port off.
- * However, we have to defer the real removal because we might
- * be in the context of the SCSI error handler now, which
- * would deadlock if we call scsi_remove_host().
+ * However, we have to defer the real removal because we
+ * are in the context of the SCSI error handler now, which
+ * will deadlock if we call scsi_remove_host().
+ *
+ * Schedule our work inside the lock to avoid a race with
+ * the flush_scheduled_work() in srp_remove_one().
*/
- spin_lock_irq(target->scsi_host->host_lock);
+ spin_lock_irq(&target->lock);
if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD;
INIT_WORK(&target->work, srp_remove_work);
schedule_work(&target->work);
}
- spin_unlock_irq(target->scsi_host->host_lock);
+ spin_unlock_irq(&target->lock);
return ret;
}
@@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(dev->mr->rkey);
+ buf->key = cpu_to_be32(target->rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
} else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) {
@@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->desc_list[i].va =
cpu_to_be64(ib_sg_dma_address(ibdev, sg));
buf->desc_list[i].key =
- cpu_to_be32(dev->mr->rkey);
+ cpu_to_be32(target->rkey);
buf->desc_list[i].len = cpu_to_be32(dma_len);
datalen += dma_len;
}
@@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->table_desc.va =
cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
buf->table_desc.key =
- cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
+ cpu_to_be32(target->rkey);
buf->table_desc.len =
cpu_to_be32(count * sizeof (struct srp_direct_buf));
@@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
}
/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head. Lock cannot be dropped between call here and
- * call to __srp_post_send().
+ * Return an IU and possible credit to the free pool
+ */
+static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
+ enum srp_iu_type iu_type)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&target->lock, flags);
+ list_add(&iu->list, &target->free_tx);
+ if (iu_type != SRP_IU_RSP)
+ ++target->req_lim;
+ spin_unlock_irqrestore(&target->lock, flags);
+}
+
+/*
+ * Must be called with target->lock held to protect req_lim and free_tx.
+ * If IU is not sent, it must be returned using srp_put_tx_iu().
*
* Note:
* An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
srp_send_completion(target->send_cq, target);
- if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+ if (list_empty(&target->free_tx))
return NULL;
/* Initiator responses to target requests do not consume credits */
- if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
- ++target->zero_req_lim;
- return NULL;
+ if (iu_type != SRP_IU_RSP) {
+ if (target->req_lim <= rsv) {
+ ++target->zero_req_lim;
+ return NULL;
+ }
+
+ --target->req_lim;
}
- iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
- iu->type = iu_type;
+ iu = list_first_entry(&target->free_tx, struct srp_iu, list);
+ list_del(&iu->list);
return iu;
}
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
- struct srp_iu *iu, int len)
+static int srp_post_send(struct srp_target_port *target,
+ struct srp_iu *iu, int len)
{
struct ib_sge list;
struct ib_send_wr wr, *bad_wr;
- int ret = 0;
list.addr = iu->dma;
list.length = len;
- list.lkey = target->srp_host->srp_dev->mr->lkey;
+ list.lkey = target->lkey;
wr.next = NULL;
- wr.wr_id = target->tx_head & SRP_SQ_MASK;
+ wr.wr_id = (uintptr_t) iu;
wr.sg_list = &list;
wr.num_sge = 1;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(target->qp, &wr, &bad_wr);
-
- if (!ret) {
- ++target->tx_head;
- if (iu->type != SRP_IU_RSP)
- --target->req_lim;
- }
-
- return ret;
+ return ib_post_send(target->qp, &wr, &bad_wr);
}
-static int srp_post_recv(struct srp_target_port *target)
+static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
{
- unsigned long flags;
- struct srp_iu *iu;
- struct ib_sge list;
struct ib_recv_wr wr, *bad_wr;
- unsigned int next;
- int ret;
-
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
- next = target->rx_head & SRP_RQ_MASK;
- wr.wr_id = next;
- iu = target->rx_ring[next];
+ struct ib_sge list;
list.addr = iu->dma;
list.length = iu->size;
- list.lkey = target->srp_host->srp_dev->mr->lkey;
+ list.lkey = target->lkey;
wr.next = NULL;
+ wr.wr_id = (uintptr_t) iu;
wr.sg_list = &list;
wr.num_sge = 1;
- ret = ib_post_recv(target->qp, &wr, &bad_wr);
- if (!ret)
- ++target->rx_head;
-
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
-
- return ret;
+ return ib_post_recv(target->qp, &wr, &bad_wr);
}
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
struct srp_request *req;
struct scsi_cmnd *scmnd;
unsigned long flags;
- s32 delta;
-
- delta = (s32) be32_to_cpu(rsp->req_lim_delta);
-
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
- target->req_lim += delta;
-
- req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
- if (be32_to_cpu(rsp->resp_data_len) < 4)
- req->tsk_status = -1;
- else
- req->tsk_status = rsp->data[3];
- complete(&req->done);
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+ spin_unlock_irqrestore(&target->lock, flags);
+
+ target->tsk_mgmt_status = -1;
+ if (be32_to_cpu(rsp->resp_data_len) >= 4)
+ target->tsk_mgmt_status = rsp->data[3];
+ complete(&target->tsk_mgmt_done);
} else {
+ req = &target->req_ring[rsp->tag];
scmnd = req->scmnd;
if (!scmnd)
shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
- if (!req->tsk_mgmt) {
- scmnd->host_scribble = (void *) -1L;
- scmnd->scsi_done(scmnd);
-
- srp_remove_req(target, req);
- } else
- req->cmd_done = 1;
+ srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done(scmnd);
}
-
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
}
static int srp_response_common(struct srp_target_port *target, s32 req_delta,
void *rsp, int len)
{
- struct ib_device *dev;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
unsigned long flags;
struct srp_iu *iu;
- int err = 1;
+ int err;
- dev = target->srp_host->srp_dev->dev;
-
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
+ spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_delta;
-
iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+ spin_unlock_irqrestore(&target->lock, flags);
+
if (!iu) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"no IU available to send response\n");
- goto out;
+ return 1;
}
ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
memcpy(iu->buf, rsp, len);
ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
- err = __srp_post_send(target, iu, len);
- if (err)
+ err = srp_post_send(target, iu, len);
+ if (err) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"unable to post response: %d\n", err);
+ srp_put_tx_iu(target, iu, SRP_IU_RSP);
+ }
-out:
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
return err;
}
@@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target,
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
- struct ib_device *dev;
- struct srp_iu *iu;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
+ struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
int res;
u8 opcode;
- iu = target->rx_ring[wc->wr_id];
-
- dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
@@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
- res = srp_post_recv(target);
+ res = srp_post_recv(target, iu);
if (res != 0)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
+ struct srp_iu *iu;
while (ib_poll_cq(cq, 1, &wc) > 0) {
if (wc.status) {
@@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
break;
}
- ++target->tx_tail;
+ iu = (struct srp_iu *) wc.wr_id;
+ list_add(&iu->list, &target->free_tx);
}
}
-static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
- void (*done)(struct scsi_cmnd *))
+static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
- struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_target_port *target = host_to_target(shost);
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
+ unsigned long flags;
int len;
if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,11 +1126,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED) {
scmnd->result = DID_BAD_TARGET << 16;
- done(scmnd);
+ scmnd->scsi_done(scmnd);
return 0;
}
+ spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
+ if (iu) {
+ req = list_first_entry(&target->free_reqs, struct srp_request,
+ list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&target->lock, flags);
+
if (!iu)
goto err;
@@ -1151,11 +1146,8 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
- req = list_first_entry(&target->free_reqs, struct srp_request, list);
-
- scmnd->scsi_done = done;
scmnd->result = 0;
- scmnd->host_scribble = (void *) (long) req->index;
+ scmnd->host_scribble = (void *) req;
cmd = iu->buf;
memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1159,38 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
req->scmnd = scmnd;
req->cmd = iu;
- req->cmd_done = 0;
- req->tsk_mgmt = NULL;
len = srp_map_data(scmnd, target, req);
if (len < 0) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "Failed to map data\n");
- goto err;
+ goto err_iu;
}
ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
- if (__srp_post_send(target, iu, len)) {
+ if (srp_post_send(target, iu, len)) {
shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
goto err_unmap;
}
- list_move_tail(&req->list, &target->req_queue);
-
return 0;
err_unmap:
srp_unmap_data(scmnd, target, req);
+err_iu:
+ srp_put_tx_iu(target, iu, SRP_IU_CMD);
+
+ spin_lock_irqsave(&target->lock, flags);
+ list_add(&req->list, &target->free_reqs);
+ spin_unlock_irqrestore(&target->lock, flags);
+
err:
return SCSI_MLQUEUE_HOST_BUSY;
}
-static DEF_SCSI_QCMD(srp_queuecommand)
-
static int srp_alloc_iu_bufs(struct srp_target_port *target)
{
int i;
@@ -1216,6 +1209,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
GFP_KERNEL, DMA_TO_DEVICE);
if (!target->tx_ring[i])
goto err;
+
+ list_add(&target->tx_ring[i]->list, &target->free_tx);
}
return 0;
@@ -1377,7 +1372,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
break;
for (i = 0; i < SRP_RQ_SIZE; i++) {
- target->status = srp_post_recv(target);
+ struct srp_iu *iu = target->rx_ring[i];
+ target->status = srp_post_recv(target, iu);
if (target->status)
break;
}
@@ -1442,25 +1438,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
}
static int srp_send_tsk_mgmt(struct srp_target_port *target,
- struct srp_request *req, u8 func)
+ u64 req_tag, unsigned int lun, u8 func)
{
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- spin_lock_irq(target->scsi_host->host_lock);
-
if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED) {
- req->scmnd->result = DID_BAD_TARGET << 16;
- goto out;
- }
+ target->state == SRP_TARGET_REMOVED)
+ return -1;
- init_completion(&req->done);
+ init_completion(&target->tsk_mgmt_done);
+ spin_lock_irq(&target->lock);
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
+ spin_unlock_irq(&target->lock);
+
if (!iu)
- goto out;
+ return -1;
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
@@ -1468,70 +1463,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
- tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
+ tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
+ tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
tsk_mgmt->tsk_mgmt_func = func;
- tsk_mgmt->task_tag = req->index;
+ tsk_mgmt->task_tag = req_tag;
ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
- if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
- goto out;
-
- req->tsk_mgmt = iu;
-
- spin_unlock_irq(target->scsi_host->host_lock);
-
- if (!wait_for_completion_timeout(&req->done,
- msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
+ if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
+ srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
return -1;
+ }
- return 0;
-
-out:
- spin_unlock_irq(target->scsi_host->host_lock);
- return -1;
-}
-
-static int srp_find_req(struct srp_target_port *target,
- struct scsi_cmnd *scmnd,
- struct srp_request **req)
-{
- if (scmnd->host_scribble == (void *) -1L)
+ if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
+ msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
return -1;
- *req = &target->req_ring[(long) scmnd->host_scribble];
-
return 0;
}
static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req;
+ struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
int ret = SUCCESS;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (target->qp_in_error)
+ if (!req || target->qp_in_error)
return FAILED;
- if (srp_find_req(target, scmnd, &req))
+ if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+ SRP_TSK_ABORT_TASK))
return FAILED;
- if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
- return FAILED;
-
- spin_lock_irq(target->scsi_host->host_lock);
- if (req->cmd_done) {
- srp_remove_req(target, req);
- scmnd->scsi_done(scmnd);
- } else if (!req->tsk_status) {
- srp_remove_req(target, req);
- scmnd->result = DID_ABORT << 16;
- } else
- ret = FAILED;
-
- spin_unlock_irq(target->scsi_host->host_lock);
+ if (req->scmnd) {
+ if (!target->tsk_mgmt_status) {
+ srp_remove_req(target, req, 0);
+ scmnd->result = DID_ABORT << 16;
+ } else
+ ret = FAILED;
+ }
return ret;
}
@@ -1539,26 +1510,23 @@ static int srp_abort(struct scsi_cmnd *scmnd)
static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req, *tmp;
+ int i;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
if (target->qp_in_error)
return FAILED;
- if (srp_find_req(target, scmnd, &req))
+ if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
+ SRP_TSK_LUN_RESET))
return FAILED;
- if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
+ if (target->tsk_mgmt_status)
return FAILED;
- if (req->tsk_status)
- return FAILED;
-
- spin_lock_irq(target->scsi_host->host_lock);
- list_for_each_entry_safe(req, tmp, &target->req_queue, list)
- if (req->scmnd->device == scmnd->device)
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ struct srp_request *req = &target->req_ring[i];
+ if (req->scmnd && req->scmnd->device == scmnd->device)
srp_reset_req(target, req);
-
- spin_unlock_irq(target->scsi_host->host_lock);
+ }
return SUCCESS;
}
@@ -1987,9 +1955,12 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
+ target->lkey = host->srp_dev->mr->lkey;
+ target->rkey = host->srp_dev->mr->rkey;
+ spin_lock_init(&target->lock);
+ INIT_LIST_HEAD(&target->free_tx);
INIT_LIST_HEAD(&target->free_reqs);
- INIT_LIST_HEAD(&target->req_queue);
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
target->req_ring[i].index = i;
list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2188,9 @@ static void srp_remove_one(struct ib_device *device)
*/
spin_lock(&host->target_lock);
list_for_each_entry(target, &host->target_list, list) {
- spin_lock_irq(target->scsi_host->host_lock);
+ spin_lock_irq(&target->lock);
target->state = SRP_TARGET_REMOVED;
- spin_unlock_irq(target->scsi_host->host_lock);
+ spin_unlock_irq(&target->lock);
}
spin_unlock(&host->target_lock);
@@ -2258,8 +2229,7 @@ static int __init srp_init_module(void)
{
int ret;
- BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
- BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
if (srp_sg_tablesize > 255) {
printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9e479..9dc6fc3fd89 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@ enum {
SRP_RQ_SHIFT = 6,
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
- SRP_RQ_MASK = SRP_RQ_SIZE - 1,
SRP_SQ_SIZE = SRP_RQ_SIZE,
- SRP_SQ_MASK = SRP_SQ_SIZE - 1,
SRP_RSP_SQ_SIZE = 1,
SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
SRP_TSK_MGMT_SQ_SIZE = 1,
SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
- SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
+ SRP_TAG_NO_REQ = ~0U,
+ SRP_TAG_TSK_MGMT = 1U << 31,
SRP_FMR_SIZE = 256,
SRP_FMR_POOL_SIZE = 1024,
@@ -113,15 +112,29 @@ struct srp_request {
struct list_head list;
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
- struct srp_iu *tsk_mgmt;
struct ib_pool_fmr *fmr;
- struct completion done;
short index;
- u8 cmd_done;
- u8 tsk_status;
};
struct srp_target_port {
+ /* These are RW in the hot path, and commonly used together */
+ struct list_head free_tx;
+ struct list_head free_reqs;
+ spinlock_t lock;
+ s32 req_lim;
+
+ /* These are read-only in the hot path */
+ struct ib_cq *send_cq ____cacheline_aligned_in_smp;
+ struct ib_cq *recv_cq;
+ struct ib_qp *qp;
+ u32 lkey;
+ u32 rkey;
+ enum srp_target_state state;
+
+ /* Everything above this point is used in the hot path of
+ * command processing. Try to keep them packed into cachelines.
+ */
+
__be64 id_ext;
__be64 ioc_guid;
__be64 service_id;
@@ -138,24 +151,13 @@ struct srp_target_port {
int path_query_id;
struct ib_cm_id *cm_id;
- struct ib_cq *recv_cq;
- struct ib_cq *send_cq;
- struct ib_qp *qp;
int max_ti_iu_len;
- s32 req_lim;
int zero_req_lim;
- unsigned rx_head;
- struct srp_iu *rx_ring[SRP_RQ_SIZE];
-
- unsigned tx_head;
- unsigned tx_tail;
struct srp_iu *tx_ring[SRP_SQ_SIZE];
-
- struct list_head free_reqs;
- struct list_head req_queue;
+ struct srp_iu *rx_ring[SRP_RQ_SIZE];
struct srp_request req_ring[SRP_CMD_SQ_SIZE];
struct work_struct work;
@@ -163,16 +165,18 @@ struct srp_target_port {
struct list_head list;
struct completion done;
int status;
- enum srp_target_state state;
int qp_in_error;
+
+ struct completion tsk_mgmt_done;
+ u8 tsk_mgmt_status;
};
struct srp_iu {
+ struct list_head list;
u64 dma;
void *buf;
size_t size;
enum dma_data_direction direction;
- enum srp_iu_type type;
};
#endif /* IB_SRP_H */
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6e7ddc09d7..4daf9e5a773 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,11 +387,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
/* Set the DMA ops to the ones from the PCI device, this could be
* fishy if we didn't know that on PowerMac it's always direct ops
* or iommu ops that will work fine
+ *
+ * To get all the fields, copy all archdata
*/
- dev->ofdev.dev.archdata.dma_ops =
- chip->lbus.pdev->dev.archdata.dma_ops;
- dev->ofdev.dev.archdata.dma_data =
- chip->lbus.pdev->dev.archdata.dma_data;
+ dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
#endif /* CONFIG_PCI */
#ifdef DEBUG
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 44549272333..2e041fd0a00 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2213,6 +2213,9 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
{
state = state_detached;
+ of_dev = dev;
+
+ dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
/* Lookup the fans in the device tree */
fcu_lookup_fans(dev->dev.of_node);
@@ -2235,6 +2238,7 @@ static const struct of_device_id fcu_match[] =
},
{},
};
+MODULE_DEVICE_TABLE(of, fcu_match);
static struct of_platform_driver fcu_of_platform_driver =
{
@@ -2252,8 +2256,6 @@ static struct of_platform_driver fcu_of_platform_driver =
*/
static int __init therm_pm72_init(void)
{
- struct device_node *np;
-
rackmac = of_machine_is_compatible("RackMac3,1");
if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -2261,34 +2263,12 @@ static int __init therm_pm72_init(void)
!rackmac)
return -ENODEV;
- printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION);
-
- np = of_find_node_by_type(NULL, "fcu");
- if (np == NULL) {
- /* Some machines have strangely broken device-tree */
- np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
- if (np == NULL) {
- printk(KERN_ERR "Can't find FCU in device-tree !\n");
- return -ENODEV;
- }
- }
- of_dev = of_platform_device_create(np, "temperature", NULL);
- if (of_dev == NULL) {
- printk(KERN_ERR "Can't register FCU platform device !\n");
- return -ENODEV;
- }
-
- of_register_platform_driver(&fcu_of_platform_driver);
-
- return 0;
+ return of_register_platform_driver(&fcu_of_platform_driver);
}
static void __exit therm_pm72_exit(void)
{
of_unregister_platform_driver(&fcu_of_platform_driver);
-
- if (of_dev)
- of_device_unregister(of_dev);
}
module_init(therm_pm72_init);
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index f1714f93af9..0a7df44a93c 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -131,11 +131,17 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
+ /*
+ * All SDHI blocks support SDIO IRQ signalling.
+ */
+ mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+
if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx;
priv->dma_priv.chan_priv_rx = &priv->param_rx;
+ priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
mmc_data->dma = &priv->dma_priv;
}
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416b9ef..2a876c4099c 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@ config MMC_BLOCK
config MMC_BLOCK_MINORS
int "Number of minors per block device"
+ depends on MMC_BLOCK
range 4 256
default 8
help
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd76ef..ef103871517 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
This option sets a default which can be overridden by the
module parameter "removable=0" or "removable=1".
+
+config MMC_CLKGATE
+ bool "MMC host clock gating (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ This will attempt to aggressively gate the clock to the MMC card.
+ This is done to save power due to gating off the logic and bus
+ noise when the MMC card is not in use. Your host driver has to
+ support handling this in order for it to be of any use.
+
+ If unsure, say N.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a2a31..63667a8f140 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,14 +303,14 @@ int mmc_add_card(struct mmc_card *card)
type, card->rca);
}
- ret = device_add(&card->dev);
- if (ret)
- return ret;
-
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
+ ret = device_add(&card->dev);
+ if (ret)
+ return ret;
+
mmc_card_set_present(card);
return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a3a780faf85..6625c057be0 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -130,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
if (mrq->done)
mrq->done(mrq);
+
+ mmc_host_clk_gate(host);
}
}
@@ -190,6 +193,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->mrq = mrq;
}
}
+ mmc_host_clk_ungate(host);
host->ops->request(host, mrq);
}
@@ -295,8 +299,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
- timeout_us += data->timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
+ if (mmc_host_clk_rate(card->host))
+ timeout_us += data->timeout_clks * 1000 /
+ (mmc_host_clk_rate(card->host) / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
@@ -614,6 +619,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing);
+ if (ios->clock > 0)
+ mmc_set_ungated(host);
host->ops->set_ios(host, ios);
}
@@ -641,6 +648,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host);
}
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_old = host->ios.clock;
+ host->ios.clock = 0;
+ host->clk_gated = true;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+ /*
+ * We should previously have gated the clock, so the clock shall
+ * be 0 here! The clock may however be 0 during initialization,
+ * when some request operations are performed before setting
+ * the frequency. When ungate is requested in that situation
+ * we just ignore the call.
+ */
+ if (host->clk_old) {
+ BUG_ON(host->ios.clock);
+ /* This call will also set host->clk_gated to false */
+ mmc_set_clock(host, host->clk_old);
+ }
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ /*
+ * We've been given a new frequency while the clock is gated,
+ * so make sure we regard this as ungating it.
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_gated = false;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
@@ -1424,35 +1486,57 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
}
EXPORT_SYMBOL(mmc_set_blocklen);
+static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+{
+ host->f_init = freq;
+
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: trying to init card at %u Hz\n",
+ mmc_hostname(host), __func__, host->f_init);
+#endif
+ mmc_power_up(host);
+ sdio_reset(host);
+ mmc_go_idle(host);
+
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ /* Order's important: probe SDIO, then SD, then MMC */
+ if (!mmc_attach_sdio(host))
+ return 0;
+ if (!mmc_attach_sd(host))
+ return 0;
+ if (!mmc_attach_mmc(host))
+ return 0;
+
+ mmc_power_off(host);
+ return -EIO;
+}
+
void mmc_rescan(struct work_struct *work)
{
+ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
- u32 ocr;
- int err;
- unsigned long flags;
int i;
- const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
-
- spin_lock_irqsave(&host->lock, flags);
- if (host->rescan_disable) {
- spin_unlock_irqrestore(&host->lock, flags);
+ if (host->rescan_disable)
return;
- }
-
- spin_unlock_irqrestore(&host->lock, flags);
-
mmc_bus_get(host);
- /* if there is a card registered, check whether it is still present */
- if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
+ /*
+ * if there is a _removable_ card registered, check whether it is
+ * still present
+ */
+ if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
+ && mmc_card_is_removable(host))
host->bus_ops->detect(host);
+ /*
+ * Let mmc_bus_put() free the bus/bus_ops if we've found that
+ * the card is no longer present.
+ */
mmc_bus_put(host);
-
-
mmc_bus_get(host);
/* if there still is a card present, stop here */
@@ -1461,8 +1545,6 @@ void mmc_rescan(struct work_struct *work)
goto out;
}
- /* detect a newly inserted card */
-
/*
* Only we can add a new handler, so it's safe to
* release the lock here.
@@ -1472,72 +1554,16 @@ void mmc_rescan(struct work_struct *work)
if (host->ops->get_cd && host->ops->get_cd(host) == 0)
goto out;
+ mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- mmc_claim_host(host);
-
- if (freqs[i] >= host->f_min)
- host->f_init = freqs[i];
- else if (!i || freqs[i-1] > host->f_min)
- host->f_init = host->f_min;
- else {
- mmc_release_host(host);
- goto out;
- }
-#ifdef CONFIG_MMC_DEBUG
- pr_info("%s: %s: trying to init card at %u Hz\n",
- mmc_hostname(host), __func__, host->f_init);
-#endif
- mmc_power_up(host);
- sdio_reset(host);
- mmc_go_idle(host);
-
- mmc_send_if_cond(host, host->ocr_avail);
-
- /*
- * First we search for SDIO...
- */
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sdio(host, ocr)) {
- mmc_claim_host(host);
- /*
- * Try SDMEM (but not MMC) even if SDIO
- * is broken.
- */
- if (mmc_send_app_op_cond(host, 0, &ocr))
- goto out_fail;
-
- if (mmc_attach_sd(host, ocr))
- mmc_power_off(host);
- }
- goto out;
- }
-
- /*
- * ...then normal SD...
- */
- err = mmc_send_app_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sd(host, ocr))
- mmc_power_off(host);
- goto out;
- }
-
- /*
- * ...and finally MMC.
- */
- err = mmc_send_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_mmc(host, ocr))
- mmc_power_off(host);
- goto out;
- }
-
-out_fail:
- mmc_release_host(host);
- mmc_power_off(host);
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ break;
+ if (freqs[i] < host->f_min)
+ break;
}
-out:
+ mmc_release_host(host);
+
+ out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
@@ -1721,6 +1747,18 @@ int mmc_resume_host(struct mmc_host *host)
if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
mmc_power_up(host);
mmc_select_voltage(host, host->ocr);
+ /*
+ * Tell runtime PM core we just powered up the card,
+ * since it still believes the card is powered off.
+ * Note that currently runtime PM is only enabled
+ * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
+ */
+ if (mmc_card_sdio(host->card) &&
+ (host->caps & MMC_CAP_POWER_OFF_CARD)) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_enable(&host->card->dev);
+ }
}
BUG_ON(!host->bus_ops->resume);
err = host->bus_ops->resume(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd11bc..ca1fdde29df 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,9 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
-int mmc_attach_sd(struct mmc_host *host, u32 ocr);
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
+int mmc_attach_mmc(struct mmc_host *host);
+int mmc_attach_sd(struct mmc_host *host);
+int mmc_attach_sdio(struct mmc_host *host);
/* Module parameters */
extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405fd74..998797ed67a 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
&mmc_clock_fops))
goto err_node;
+#ifdef CONFIG_MMC_CLKGATE
+ if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+ root, &host->clk_delay))
+ goto err_node;
+#endif
return;
err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af27e03..b3ac6c5bc5c 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007-2008 Pierre Ossman
+ * Copyright (C) 2010 Linus Walleij
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
#include <linux/suspend.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "core.h"
#include "host.h"
@@ -50,6 +52,205 @@ void mmc_unregister_host_class(void)
static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);
+#ifdef CONFIG_MMC_CLKGATE
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+ unsigned long tick_ns;
+ unsigned long freq = host->ios.clock;
+ unsigned long flags;
+
+ if (!freq) {
+ pr_debug("%s: frequency set to 0 in disable function, "
+ "this means the clock is already disabled.\n",
+ mmc_hostname(host));
+ return;
+ }
+ /*
+ * New requests may have appeared while we were scheduling,
+ * then there is no reason to delay the check before
+ * clk_disable().
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+
+ /*
+ * Delay n bus cycles (at least 8 from MMC spec) before attempting
+ * to disable the MCI block clock. The reference count may have
+ * gone up again after this delay due to rescheduling!
+ */
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ tick_ns = DIV_ROUND_UP(1000000000, freq);
+ ndelay(host->clk_delay * tick_ns);
+ } else {
+ /* New users appeared while waiting for this work */
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return;
+ }
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ /* This will set host->ios.clock to 0 */
+ mmc_gate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+ }
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+ struct mmc_host *host = container_of(work, struct mmc_host,
+ clk_gate_work);
+
+ mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ * mmc_host_clk_ungate - ungate hardware MCI clocks
+ * @host: host to ungate.
+ *
+ * Makes sure the host ios.clock is restored to a non-zero value
+ * past this call. Increase clock reference count and ungate clock
+ * if we're the first user.
+ */
+void mmc_host_clk_ungate(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_ungate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+ }
+ host->clk_requests++;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_may_gate_card - check if this card may be gated
+ * @card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+ /* If there is no card we may gate it */
+ if (!card)
+ return true;
+ /*
+ * Don't gate SDIO cards! These need to be clocked at all times
+ * since they may be independent systems generating interrupts
+ * and other events. The clock requests counter from the core will
+ * go down to zero since the core does not need it, but we will not
+ * gate the clock, because there is somebody out there that may still
+ * be using it.
+ */
+ if (mmc_card_sdio(card))
+ return false;
+
+ return true;
+}
+
+/**
+ * mmc_host_clk_gate - gate off hardware MCI clocks
+ * @host: host to gate.
+ *
+ * Calls the host driver with ios.clock set to zero as often as possible
+ * in order to gate off hardware MCI clocks. Decrease clock reference
+ * count and schedule disabling of clock.
+ */
+void mmc_host_clk_gate(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_requests--;
+ if (mmc_host_may_gate_card(host->card) &&
+ !host->clk_requests)
+ schedule_work(&host->clk_gate_work);
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ * mmc_host_clk_rate - get current clock frequency setting
+ * @host: host to get the clock frequency for.
+ *
+ * Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ unsigned long freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated)
+ freq = host->clk_old;
+ else
+ freq = host->ios.clock;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return freq;
+}
+
+/**
+ * mmc_host_clk_init - set up clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+ host->clk_requests = 0;
+ /* Hold MCI clock for 8 cycles by default */
+ host->clk_delay = 8;
+ host->clk_gated = false;
+ INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ spin_lock_init(&host->clk_lock);
+ mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_clk_exit - shut down clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+ /*
+ * Wait for any outstanding gate and then make sure we're
+ * ungated before exiting.
+ */
+ if (cancel_work_sync(&host->clk_gate_work))
+ mmc_host_clk_gate_delayed(host);
+ if (host->clk_gated)
+ mmc_host_clk_ungate(host);
+ /* There should be only one user now */
+ WARN_ON(host->clk_requests > 1);
+}
+
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+#endif
+
/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
@@ -82,6 +283,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);
+ mmc_host_clk_init(host);
+
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +366,8 @@ void mmc_remove_host(struct mmc_host *host)
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
+
+ mmc_host_clk_exit(host);
}
EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +388,3 @@ void mmc_free_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_free_host);
-
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e1109a3..de199f91192 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
*/
#ifndef _MMC_CORE_HOST_H
#define _MMC_CORE_HOST_H
+#include <linux/mmc/host.h>
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_ungate(struct mmc_host *host);
+void mmc_host_clk_gate(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_ungate(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_gate(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ return host->ios.clock;
+}
+#endif
+
void mmc_host_deeper_disable(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3b880..16006ef153f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -534,39 +534,57 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
- unsigned ext_csd_bit, bus_width;
-
- if (host->caps & MMC_CAP_8_BIT_DATA) {
- if (ddr)
- ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
- else
- ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
- bus_width = MMC_BUS_WIDTH_8;
- } else {
- if (ddr)
- ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
- else
- ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
- bus_width = MMC_BUS_WIDTH_4;
+ static unsigned ext_csd_bits[][2] = {
+ { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
+ { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
+ { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_8,
+ MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1
+ };
+ unsigned idx, bus_width = 0;
+
+ if (host->caps & MMC_CAP_8_BIT_DATA)
+ idx = 0;
+ else
+ idx = 1;
+ for (; idx < ARRAY_SIZE(bus_widths); idx++) {
+ bus_width = bus_widths[idx];
+ if (bus_width == MMC_BUS_WIDTH_1)
+ ddr = 0; /* no DDR for 1-bit width */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx][0]);
+ if (!err) {
+ mmc_set_bus_width_ddr(card->host,
+ bus_width, MMC_SDR_MODE);
+ /*
+ * If controller can't handle bus width test,
+ * use the highest bus width to maintain
+ * compatibility with previous MMC behavior.
+ */
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ break;
+ err = mmc_bus_test(card, bus_width);
+ if (!err)
+ break;
+ }
}
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BUS_WIDTH, ext_csd_bit);
-
- if (err && err != -EBADMSG)
- goto free_card;
-
+ if (!err && ddr) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx][1]);
+ }
if (err) {
printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
- "failed\n", mmc_hostname(card->host),
- 1 << bus_width, ddr);
- err = 0;
- } else {
- if (ddr)
- mmc_card_set_ddr_mode(card);
- else
- ddr = MMC_SDR_MODE;
-
+ "failed\n", mmc_hostname(card->host),
+ 1 << bus_width, ddr);
+ goto free_card;
+ } else if (ddr) {
+ mmc_card_set_ddr_mode(card);
mmc_set_bus_width_ddr(card->host, bus_width, ddr);
}
}
@@ -737,14 +755,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
/*
* Starting point for MMC card init.
*/
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
+int mmc_attach_mmc(struct mmc_host *host)
{
int err;
+ u32 ocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
mmc_attach_bus_ops(host);
+ if (host->ocr_avail_mmc)
+ host->ocr_avail = host->ocr_avail_mmc;
/*
* We need to get OCR a different way for SPI.
@@ -784,20 +809,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
goto err;
mmc_release_host(host);
-
err = mmc_add_card(host->card);
+ mmc_claim_host(host);
if (err)
goto remove_card;
return 0;
remove_card:
+ mmc_release_host(host);
mmc_remove_card(host->card);
- host->card = NULL;
mmc_claim_host(host);
+ host->card = NULL;
err:
mmc_detach_bus(host);
- mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c9ede..60842f878de 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -462,3 +462,104 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
return 0;
}
+static int
+mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
+ u8 len)
+{
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct scatterlist sg;
+ u8 *data_buf;
+ u8 *test_buf;
+ int i, err;
+ static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
+ static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
+
+ /* dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ data_buf = kmalloc(len, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ if (len == 8)
+ test_buf = testdata_8bit;
+ else if (len == 4)
+ test_buf = testdata_4bit;
+ else {
+ printk(KERN_ERR "%s: Invalid bus_width %d\n",
+ mmc_hostname(host), len);
+ kfree(data_buf);
+ return -EINVAL;
+ }
+
+ if (opcode == MMC_BUS_TEST_W)
+ memcpy(data_buf, test_buf, len);
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+
+ /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
+ * rely on callers to never use this with "native" calls for reading
+ * CSD or CID. Native versions of those commands use the R2 type,
+ * not R1 plus a data block.
+ */
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = len;
+ data.blocks = 1;
+ if (opcode == MMC_BUS_TEST_R)
+ data.flags = MMC_DATA_READ;
+ else
+ data.flags = MMC_DATA_WRITE;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, data_buf, len);
+ mmc_wait_for_req(host, &mrq);
+ err = 0;
+ if (opcode == MMC_BUS_TEST_R) {
+ for (i = 0; i < len / 4; i++)
+ if ((test_buf[i] ^ data_buf[i]) != 0xff) {
+ err = -EIO;
+ break;
+ }
+ }
+ kfree(data_buf);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return err;
+}
+
+int mmc_bus_test(struct mmc_card *card, u8 bus_width)
+{
+ int err, width;
+
+ if (bus_width == MMC_BUS_WIDTH_8)
+ width = 8;
+ else if (bus_width == MMC_BUS_WIDTH_4)
+ width = 4;
+ else if (bus_width == MMC_BUS_WIDTH_1)
+ return 0; /* no need for test */
+ else
+ return -EINVAL;
+
+ /*
+ * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
+ * is a problem. This improves chances that the test will work.
+ */
+ mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
+ err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
+ return err;
+}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e8417..e6d44b8a18d 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_card_sleepawake(struct mmc_host *host, int sleep);
+int mmc_bus_test(struct mmc_card *card, u8 bus_width);
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4dffd28..d18c32bca99 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,14 +764,21 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
/*
* Starting point for SD card init.
*/
-int mmc_attach_sd(struct mmc_host *host, u32 ocr)
+int mmc_attach_sd(struct mmc_host *host)
{
int err;
+ u32 ocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
mmc_sd_attach_bus_ops(host);
+ if (host->ocr_avail_sd)
+ host->ocr_avail = host->ocr_avail_sd;
/*
* We need to get OCR a different way for SPI.
@@ -795,7 +802,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
ocr &= ~0x7F;
}
- if (ocr & MMC_VDD_165_195) {
+ if ((ocr & MMC_VDD_165_195) &&
+ !(host->ocr_avail_sd & MMC_VDD_165_195)) {
printk(KERN_WARNING "%s: SD card claims to support the "
"incompletely defined 'low voltage range'. This "
"will be ignored.\n", mmc_hostname(host));
@@ -820,20 +828,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
goto err;
mmc_release_host(host);
-
err = mmc_add_card(host->card);
+ mmc_claim_host(host);
if (err)
goto remove_card;
return 0;
remove_card:
+ mmc_release_host(host);
mmc_remove_card(host->card);
host->card = NULL;
mmc_claim_host(host);
err:
mmc_detach_bus(host);
- mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f94ac4..5c4a54d9b6a 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -627,15 +627,27 @@ static int mmc_sdio_suspend(struct mmc_host *host)
static int mmc_sdio_resume(struct mmc_host *host)
{
- int i, err;
+ int i, err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
/* Basic card reinitialization. */
mmc_claim_host(host);
- err = mmc_sdio_init_card(host, host->ocr, host->card,
+
+ /* No need to reinitialize powered-resumed nonremovable cards */
+ if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
+ err = mmc_sdio_init_card(host, host->ocr, host->card,
(host->pm_flags & MMC_PM_KEEP_POWER));
+ else if (mmc_card_is_powered_resumed(host)) {
+ /* We may have switched to 1-bit mode during suspend */
+ err = sdio_enable_4bit_bus(host->card);
+ if (err > 0) {
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
+ }
+
if (!err && host->sdio_irqs)
mmc_signal_sdio_irq(host);
mmc_release_host(host);
@@ -690,16 +702,22 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
/*
* Starting point for SDIO card init.
*/
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
+int mmc_attach_sdio(struct mmc_host *host)
{
- int err;
- int i, funcs;
+ int err, i, funcs;
+ u32 ocr;
struct mmc_card *card;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
mmc_attach_bus(host, &mmc_sdio_ops);
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
/*
* Sanity check the voltages that the card claims to
@@ -769,12 +787,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
pm_runtime_enable(&card->sdio_func[i]->dev);
}
- mmc_release_host(host);
-
/*
* First add the card to the driver model...
*/
+ mmc_release_host(host);
err = mmc_add_card(host->card);
+ mmc_claim_host(host);
if (err)
goto remove_added;
@@ -792,15 +810,17 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
remove_added:
/* Remove without lock if the device has been added. */
+ mmc_release_host(host);
mmc_sdio_remove(host);
mmc_claim_host(host);
remove:
/* And with lock if it hasn't been added. */
+ mmc_release_host(host);
if (host->card)
mmc_sdio_remove(host);
+ mmc_claim_host(host);
err:
mmc_detach_bus(host);
- mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da443e33..d29b9c36919 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@ out:
#ifdef CONFIG_PM_RUNTIME
-static int sdio_bus_pm_prepare(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
-
- /*
- * Resume an SDIO device which was suspended at run time at this
- * point, in order to allow standard SDIO suspend/resume paths
- * to keep working as usual.
- *
- * Ultimately, the SDIO driver itself will decide (in its
- * suspend handler, or lack thereof) whether the card should be
- * removed or kept, and if kept, at what power state.
- *
- * At this point, PM core have increased our use count, so it's
- * safe to directly resume the device. After system is resumed
- * again, PM core will drop back its runtime PM use count, and if
- * needed device will be suspended again.
- *
- * The end result is guaranteed to be a power state that is
- * coherent with the device's runtime PM use count.
- *
- * The return value of pm_runtime_resume is deliberately unchecked
- * since there is little point in failing system suspend if a
- * device can't be resumed.
- */
- if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
- pm_runtime_resume(dev);
-
- return 0;
-}
-
static const struct dev_pm_ops sdio_bus_pm_ops = {
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
pm_generic_runtime_idle
)
- .prepare = sdio_bus_pm_prepare,
};
#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e960a9300eb..c22a4c03998 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -142,6 +142,27 @@ config MMC_SDHCI_ESDHC_IMX
If unsure, say N.
+config MMC_SDHCI_DOVE
+ bool "SDHCI support on Marvell's Dove SoC"
+ depends on ARCH_DOVE
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Secure Digital Host Controller Interface in
+ Marvell's Dove SoC.
+
+ If unsure, say N.
+
+config MMC_SDHCI_TEGRA
+ tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+ depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Tegra SD/MMC controller. If you have a Tegra
+ platform with SD or MMC devices, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -460,6 +481,22 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
help
If you say yes here SD-Cards may work on the EZkit.
+config MMC_DW
+ tristate "Synopsys DesignWare Memory Card Interface"
+ depends on ARM
+ help
+ This selects support for the Synopsys DesignWare Mobile Storage IP
+ block, this provides host support for SD and MMC interfaces, in both
+ PIO and external DMA modes.
+
+config MMC_DW_IDMAC
+ bool "Internal DMAC interface"
+ depends on MMC_DW
+ help
+ This selects support for the internal DMAC block within the Synopsys
+ Designware Mobile Storage IP block. This disables the external DMA
+ interface.
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff43b3..e834fb223e9 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_USHC) += ushc.o
@@ -39,6 +40,8 @@ obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
sdhci-platform-y := sdhci-pltfm.o
sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547cf701..0076c7448fe 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
#define DAVINCI_MMCBLNC 0x60
#define DAVINCI_SDIOCTL 0x64
#define DAVINCI_SDIOST0 0x68
-#define DAVINCI_SDIOEN 0x6C
-#define DAVINCI_SDIOST 0x70
+#define DAVINCI_SDIOIEN 0x6C
+#define DAVINCI_SDIOIST 0x70
#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
/* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
+/* DAVINCI_SDIOST0 definitions */
+#define SDIOST0_DAT1_HI BIT(0)
+
+/* DAVINCI_SDIOIEN definitions */
+#define SDIOIEN_IOINTEN BIT(0)
+
+/* DAVINCI_SDIOIST definitions */
+#define SDIOIST_IOINT BIT(0)
/* MMCSD Init clock in Hz in opendrain mode */
#define MMCSD_INIT_CLOCK 200000
@@ -164,7 +172,7 @@ struct mmc_davinci_host {
unsigned int mmc_input_clk;
void __iomem *base;
struct resource *mem_res;
- int irq;
+ int mmc_irq, sdio_irq;
unsigned char bus_mode;
#define DAVINCI_MMC_DATADIR_NONE 0
@@ -184,6 +192,7 @@ struct mmc_davinci_host {
u32 rxdma, txdma;
bool use_dma;
bool do_dma;
+ bool sdio_int;
/* Scatterlist DMA uses one or more parameter RAM entries:
* the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
struct scatterlist *sg;
unsigned sg_len;
unsigned bytes_left = host->bytes_left;
- const unsigned shift = ffs(rw_threshold) - 1;;
+ const unsigned shift = ffs(rw_threshold) - 1;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
template = &host->tx_template;
@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
{
host->data = NULL;
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ /*
+ * SDIO Interrupt Detection work-around as suggested by
+ * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
+ * 2.1.6): Signal SDIO interrupt only if it is enabled by core
+ */
+ if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
+ SDIOST0_DAT1_HI)) {
+ writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ }
+
if (host->do_dma) {
davinci_abort_dma(host);
@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
mmc_davinci_reset_ctrl(host, 0);
}
+static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
+{
+ struct mmc_davinci_host *host = dev_id;
+ unsigned int status;
+
+ status = readl(host->base + DAVINCI_SDIOIST);
+ if (status & SDIOIST_IOINT) {
+ dev_dbg(mmc_dev(host->mmc),
+ "SDIO interrupt status %x\n", status);
+ writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ return IRQ_HANDLED;
+}
+
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
{
struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
return config->get_ro(pdev->id);
}
+static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ if (enable) {
+ if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
+ writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ } else {
+ host->sdio_int = true;
+ writel(readl(host->base + DAVINCI_SDIOIEN) |
+ SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
+ }
+ } else {
+ host->sdio_int = false;
+ writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
+ host->base + DAVINCI_SDIOIEN);
+ }
+}
+
static struct mmc_host_ops mmc_davinci_ops = {
.request = mmc_davinci_request,
.set_ios = mmc_davinci_set_ios,
.get_cd = mmc_davinci_get_cd,
.get_ro = mmc_davinci_get_ro,
+ .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
};
/*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->nr_sg = MAX_NR_SG;
host->use_dma = use_dma;
- host->irq = irq;
+ host->mmc_irq = irq;
+ host->sdio_irq = platform_get_irq(pdev, 1);
if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
host->use_dma = 0;
@@ -1270,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
if (ret)
goto out;
+ if (host->sdio_irq >= 0) {
+ ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
+ mmc_hostname(mmc), host);
+ if (!ret)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ }
+
rename_region(mem, mmc_hostname(mmc));
dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
mmc_davinci_cpufreq_deregister(host);
mmc_remove_host(host->mmc);
- free_irq(host->irq, host);
+ free_irq(host->mmc_irq, host);
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ free_irq(host->sdio_irq, host);
davinci_release_dma_channels(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 00000000000..2fcc82577c1
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/bitops.h>
+
+#include "dw_mmc.h"
+
+/* Common flag combinations */
+#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
+ SDMMC_INT_HTO | SDMMC_INT_SBE | \
+ SDMMC_INT_EBE)
+#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
+ SDMMC_INT_RESP_ERR)
+#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
+ DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
+#define DW_MCI_SEND_STATUS 1
+#define DW_MCI_RECV_STATUS 2
+#define DW_MCI_DMA_THRESHOLD 16
+
+#ifdef CONFIG_MMC_DW_IDMAC
+struct idmac_desc {
+ u32 des0; /* Control Descriptor */
+#define IDMAC_DES0_DIC BIT(1)
+#define IDMAC_DES0_LD BIT(2)
+#define IDMAC_DES0_FD BIT(3)
+#define IDMAC_DES0_CH BIT(4)
+#define IDMAC_DES0_ER BIT(5)
+#define IDMAC_DES0_CES BIT(30)
+#define IDMAC_DES0_OWN BIT(31)
+
+ u32 des1; /* Buffer sizes */
+#define IDMAC_SET_BUFFER1_SIZE(d, s) \
+ ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+
+ u32 des2; /* buffer 1 physical address */
+
+ u32 des3; /* buffer 2 physical address */
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ * processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ * &struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct dw_mci_slot {
+ struct mmc_host *mmc;
+ struct dw_mci *host;
+
+ u32 ctype;
+
+ struct mmc_request *mrq;
+ struct list_head queue_node;
+
+ unsigned int clock;
+ unsigned long flags;
+#define DW_MMC_CARD_PRESENT 0
+#define DW_MMC_CARD_NEED_INIT 1
+ int id;
+ int last_detect_state;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static int dw_mci_req_show(struct seq_file *s, void *v)
+{
+ struct dw_mci_slot *slot = s->private;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_command *stop;
+ struct mmc_data *data;
+
+ /* Make sure we get a consistent snapshot */
+ spin_lock_bh(&slot->host->lock);
+ mrq = slot->mrq;
+
+ if (mrq) {
+ cmd = mrq->cmd;
+ data = mrq->data;
+ stop = mrq->stop;
+
+ if (cmd)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ cmd->opcode, cmd->arg, cmd->flags,
+ cmd->resp[0], cmd->resp[1], cmd->resp[2],
+ cmd->resp[2], cmd->error);
+ if (data)
+ seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+ data->bytes_xfered, data->blocks,
+ data->blksz, data->flags, data->error);
+ if (stop)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ stop->opcode, stop->arg, stop->flags,
+ stop->resp[0], stop->resp[1], stop->resp[2],
+ stop->resp[2], stop->error);
+ }
+
+ spin_unlock_bh(&slot->host->lock);
+
+ return 0;
+}
+
+static int dw_mci_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dw_mci_req_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_req_fops = {
+ .owner = THIS_MODULE,
+ .open = dw_mci_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dw_mci_regs_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
+ seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
+ seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
+ seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
+ seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
+ seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+
+ return 0;
+}
+
+static int dw_mci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dw_mci_regs_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = dw_mci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
+{
+ struct mmc_host *mmc = slot->mmc;
+ struct dw_mci *host = slot->host;
+ struct dentry *root;
+ struct dentry *node;
+
+ root = mmc->debugfs_root;
+ if (!root)
+ return;
+
+ node = debugfs_create_file("regs", S_IRUSR, root, host,
+ &dw_mci_regs_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_file("req", S_IRUSR, root, slot,
+ &dw_mci_req_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("pending_events", S_IRUSR, root,
+ (u32 *)&host->pending_events);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("completed_events", S_IRUSR, root,
+ (u32 *)&host->completed_events);
+ if (!node)
+ goto err;
+
+ return;
+
+err:
+ dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+static void dw_mci_set_timeout(struct dw_mci *host)
+{
+ /* timeout (maximum) */
+ mci_writel(host, TMOUT, 0xffffffff);
+}
+
+static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+ u32 cmdr;
+ cmd->error = -EINPROGRESS;
+
+ cmdr = cmd->opcode;
+
+ if (cmdr == MMC_STOP_TRANSMISSION)
+ cmdr |= SDMMC_CMD_STOP;
+ else
+ cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ /* We expect a response, so set this bit */
+ cmdr |= SDMMC_CMD_RESP_EXP;
+ if (cmd->flags & MMC_RSP_136)
+ cmdr |= SDMMC_CMD_RESP_LONG;
+ }
+
+ if (cmd->flags & MMC_RSP_CRC)
+ cmdr |= SDMMC_CMD_RESP_CRC;
+
+ data = cmd->data;
+ if (data) {
+ cmdr |= SDMMC_CMD_DAT_EXP;
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= SDMMC_CMD_STRM_MODE;
+ if (data->flags & MMC_DATA_WRITE)
+ cmdr |= SDMMC_CMD_DAT_WR;
+ }
+
+ return cmdr;
+}
+
+static void dw_mci_start_command(struct dw_mci *host,
+ struct mmc_command *cmd, u32 cmd_flags)
+{
+ host->cmd = cmd;
+ dev_vdbg(&host->pdev->dev,
+ "start command: ARGR=0x%08x CMDR=0x%08x\n",
+ cmd->arg, cmd_flags);
+
+ mci_writel(host, CMDARG, cmd->arg);
+ wmb();
+
+ mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+}
+
+static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
+{
+ dw_mci_start_command(host, data->stop, host->stop_cmdr);
+}
+
+/* DMA interface functions */
+static void dw_mci_stop_dma(struct dw_mci *host)
+{
+ if (host->use_dma) {
+ host->dma_ops->stop(host);
+ host->dma_ops->cleanup(host);
+ } else {
+ /* Data transfer was stopped by the interrupt handler */
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ }
+}
+
+#ifdef CONFIG_MMC_DW_IDMAC
+static void dw_mci_dma_cleanup(struct dw_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (data)
+ dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+static void dw_mci_idmac_stop_dma(struct dw_mci *host)
+{
+ u32 temp;
+
+ /* Disable and reset the IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_USE_IDMAC;
+ temp |= SDMMC_CTRL_DMA_RESET;
+ mci_writel(host, CTRL, temp);
+
+ /* Stop the IDMAC running */
+ temp = mci_readl(host, BMOD);
+ temp &= ~SDMMC_IDMAC_ENABLE;
+ mci_writel(host, BMOD, temp);
+}
+
+static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ dev_vdbg(&host->pdev->dev, "DMA complete\n");
+
+ host->dma_ops->cleanup(host);
+
+ /*
+ * If the card was removed, data will be NULL. No point in trying to
+ * send the stop command or waiting for NBUSY in this case.
+ */
+ if (data) {
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+}
+
+static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
+ unsigned int sg_len)
+{
+ int i;
+ struct idmac_desc *desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++, desc++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+ u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+ /* Set the OWN bit and disable interrupts for this descriptor */
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
+
+ /* Buffer length */
+ IDMAC_SET_BUFFER1_SIZE(desc, length);
+
+ /* Physical address to DMA to/from */
+ desc->des2 = mem_addr;
+ }
+
+ /* Set first descriptor */
+ desc = host->sg_cpu;
+ desc->des0 |= IDMAC_DES0_FD;
+
+ /* Set last descriptor */
+ desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
+ desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+ desc->des0 |= IDMAC_DES0_LD;
+
+ wmb();
+}
+
+static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+{
+ u32 temp;
+
+ dw_mci_translate_sglist(host, host->data, sg_len);
+
+ /* Select IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_USE_IDMAC;
+ mci_writel(host, CTRL, temp);
+
+ wmb();
+
+ /* Enable the IDMAC */
+ temp = mci_readl(host, BMOD);
+ temp |= SDMMC_IDMAC_ENABLE;
+ mci_writel(host, BMOD, temp);
+
+ /* Start it running */
+ mci_writel(host, PLDMND, 1);
+}
+
+static int dw_mci_idmac_init(struct dw_mci *host)
+{
+ struct idmac_desc *p;
+ int i;
+
+ /* Number of descriptors in the ring buffer */
+ host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+
+ /* Forward link the descriptor list */
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+ p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
+
+ /* Set the last descriptor as the end-of-ring descriptor */
+ p->des3 = host->sg_dma;
+ p->des0 = IDMAC_DES0_ER;
+
+ /* Mask out interrupts - get Tx & Rx complete only */
+ mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
+ SDMMC_IDMAC_INT_TI);
+
+ /* Set the descriptor base address */
+ mci_writel(host, DBADDR, host->sg_dma);
+ return 0;
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+ .init = dw_mci_idmac_init,
+ .start = dw_mci_idmac_start_dma,
+ .stop = dw_mci_idmac_stop_dma,
+ .complete = dw_mci_idmac_complete_dma,
+ .cleanup = dw_mci_dma_cleanup,
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ unsigned int i, direction, sg_len;
+ u32 temp;
+
+ /* If we don't have a channel, we can't do DMA */
+ if (!host->use_dma)
+ return -ENODEV;
+
+ /*
+ * We don't do DMA on "complex" transfers, i.e. with
+ * non-word-aligned buffers or lengths. Also, we don't bother
+ * with all the DMA setup overhead for short transfers.
+ */
+ if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
+ return -EINVAL;
+ if (data->blksz & 3)
+ return -EINVAL;
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3)
+ return -EINVAL;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+ direction);
+
+ dev_vdbg(&host->pdev->dev,
+ "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+ (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
+ sg_len);
+
+ /* Enable the DMA interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+
+ /* Disable RX/TX IRQs, let DMA handle it */
+ temp = mci_readl(host, INTMASK);
+ temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+ mci_writel(host, INTMASK, temp);
+
+ host->dma_ops->start(host, sg_len);
+
+ return 0;
+}
+
+static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
+{
+ u32 temp;
+
+ data->error = -EINPROGRESS;
+
+ WARN_ON(host->data);
+ host->sg = NULL;
+ host->data = data;
+
+ if (dw_mci_submit_data_dma(host, data)) {
+ host->sg = data->sg;
+ host->pio_offset = 0;
+ if (data->flags & MMC_DATA_READ)
+ host->dir_status = DW_MCI_RECV_STATUS;
+ else
+ host->dir_status = DW_MCI_SEND_STATUS;
+
+ temp = mci_readl(host, INTMASK);
+ temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
+ mci_writel(host, INTMASK, temp);
+
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+ }
+}
+
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
+{
+ struct dw_mci *host = slot->host;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ unsigned int cmd_status = 0;
+
+ mci_writel(host, CMDARG, arg);
+ wmb();
+ mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+
+ while (time_before(jiffies, timeout)) {
+ cmd_status = mci_readl(host, CMD);
+ if (!(cmd_status & SDMMC_CMD_START))
+ return;
+ }
+ dev_err(&slot->mmc->class_dev,
+ "Timeout sending command (cmd %#x arg %#x status %#x)\n",
+ cmd, arg, cmd_status);
+}
+
+static void dw_mci_setup_bus(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ u32 div;
+
+ if (slot->clock != host->current_speed) {
+ if (host->bus_hz % slot->clock)
+ /*
+ * move the + 1 after the divide to prevent
+ * over-clocking the card.
+ */
+ div = ((host->bus_hz / slot->clock) >> 1) + 1;
+ else
+ div = (host->bus_hz / slot->clock) >> 1;
+
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
+ " div = %d)\n", slot->id, host->bus_hz, slot->clock,
+ div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+
+ /* disable clock */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ /* set clock to desired speed */
+ mci_writel(host, CLKDIV, div);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ /* enable clock */
+ mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ host->current_speed = slot->clock;
+ }
+
+ /* Set the current slot bus width */
+ mci_writel(host, CTYPE, slot->ctype);
+}
+
+static void dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot)
+{
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ u32 cmdflags;
+
+ mrq = slot->mrq;
+ if (host->pdata->select_slot)
+ host->pdata->select_slot(slot->id);
+
+ /* Slot specific timing and width adjustment */
+ dw_mci_setup_bus(slot);
+
+ host->cur_slot = slot;
+ host->mrq = mrq;
+
+ host->pending_events = 0;
+ host->completed_events = 0;
+ host->data_status = 0;
+
+ data = mrq->data;
+ if (data) {
+ dw_mci_set_timeout(host);
+ mci_writel(host, BYTCNT, data->blksz*data->blocks);
+ mci_writel(host, BLKSIZ, data->blksz);
+ }
+
+ cmd = mrq->cmd;
+ cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
+
+ /* this is the first command, send the initialization clock */
+ if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
+ cmdflags |= SDMMC_CMD_INIT;
+
+ if (data) {
+ dw_mci_submit_data(host, data);
+ wmb();
+ }
+
+ dw_mci_start_command(host, cmd, cmdflags);
+
+ if (mrq->stop)
+ host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+}
+
+static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
+ struct mmc_request *mrq)
+{
+ dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+ host->state);
+
+ spin_lock_bh(&host->lock);
+ slot->mrq = mrq;
+
+ if (host->state == STATE_IDLE) {
+ host->state = STATE_SENDING_CMD;
+ dw_mci_start_request(host, slot);
+ } else {
+ list_add_tail(&slot->queue_node, &host->queue);
+ }
+
+ spin_unlock_bh(&host->lock);
+}
+
+static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+
+ WARN_ON(slot->mrq);
+
+ if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ /* We don't support multiple blocks of weird lengths. */
+ dw_mci_queue_request(host, slot, mrq);
+}
+
+static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+
+ /* set default 1 bit mode */
+ slot->ctype = SDMMC_CTYPE_1BIT;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ slot->ctype = SDMMC_CTYPE_1BIT;
+ break;
+ case MMC_BUS_WIDTH_4:
+ slot->ctype = SDMMC_CTYPE_4BIT;
+ break;
+ }
+
+ if (ios->clock) {
+ /*
+ * Use mirror of ios->clock to prevent race with mmc
+ * core ios update when finding the minimum.
+ */
+ slot->clock = ios->clock;
+ }
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+ break;
+ default:
+ break;
+ }
+}
+
+static int dw_mci_get_ro(struct mmc_host *mmc)
+{
+ int read_only;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_board *brd = slot->host->pdata;
+
+ /* Use platform get_ro function, else try on board write protect */
+ if (brd->get_ro)
+ read_only = brd->get_ro(slot->id);
+ else
+ read_only =
+ mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+
+ dev_dbg(&mmc->class_dev, "card is %s\n",
+ read_only ? "read-only" : "read-write");
+
+ return read_only;
+}
+
+static int dw_mci_get_cd(struct mmc_host *mmc)
+{
+ int present;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_board *brd = slot->host->pdata;
+
+ /* Use platform get_cd function, else try onboard card detect */
+ if (brd->get_cd)
+ present = !brd->get_cd(slot->id);
+ else
+ present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
+ == 0 ? 1 : 0;
+
+ if (present)
+ dev_dbg(&mmc->class_dev, "card is present\n");
+ else
+ dev_dbg(&mmc->class_dev, "card is not present\n");
+
+ return present;
+}
+
+static const struct mmc_host_ops dw_mci_ops = {
+ .request = dw_mci_request,
+ .set_ios = dw_mci_set_ios,
+ .get_ro = dw_mci_get_ro,
+ .get_cd = dw_mci_get_cd,
+};
+
+static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
+ __releases(&host->lock)
+ __acquires(&host->lock)
+{
+ struct dw_mci_slot *slot;
+ struct mmc_host *prev_mmc = host->cur_slot->mmc;
+
+ WARN_ON(host->cmd || host->data);
+
+ host->cur_slot->mrq = NULL;
+ host->mrq = NULL;
+ if (!list_empty(&host->queue)) {
+ slot = list_entry(host->queue.next,
+ struct dw_mci_slot, queue_node);
+ list_del(&slot->queue_node);
+ dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
+ mmc_hostname(slot->mmc));
+ host->state = STATE_SENDING_CMD;
+ dw_mci_start_request(host, slot);
+ } else {
+ dev_vdbg(&host->pdev->dev, "list empty\n");
+ host->state = STATE_IDLE;
+ }
+
+ spin_unlock(&host->lock);
+ mmc_request_done(prev_mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+{
+ u32 status = host->cmd_status;
+
+ host->cmd_status = 0;
+
+ /* Read the response from the card (up to 16 bytes) */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = mci_readl(host, RESP0);
+ cmd->resp[2] = mci_readl(host, RESP1);
+ cmd->resp[1] = mci_readl(host, RESP2);
+ cmd->resp[0] = mci_readl(host, RESP3);
+ } else {
+ cmd->resp[0] = mci_readl(host, RESP0);
+ cmd->resp[1] = 0;
+ cmd->resp[2] = 0;
+ cmd->resp[3] = 0;
+ }
+ }
+
+ if (status & SDMMC_INT_RTO)
+ cmd->error = -ETIMEDOUT;
+ else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+ cmd->error = -EILSEQ;
+ else if (status & SDMMC_INT_RESP_ERR)
+ cmd->error = -EIO;
+ else
+ cmd->error = 0;
+
+ if (cmd->error) {
+ /* newer ip versions need a delay between retries */
+ if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
+ mdelay(20);
+
+ if (cmd->data) {
+ host->data = NULL;
+ dw_mci_stop_dma(host);
+ }
+ }
+}
+
+static void dw_mci_tasklet_func(unsigned long priv)
+{
+ struct dw_mci *host = (struct dw_mci *)priv;
+ struct mmc_data *data;
+ struct mmc_command *cmd;
+ enum dw_mci_state state;
+ enum dw_mci_state prev_state;
+ u32 status;
+
+ spin_lock(&host->lock);
+
+ state = host->state;
+ data = host->data;
+
+ do {
+ prev_state = state;
+
+ switch (state) {
+ case STATE_IDLE:
+ break;
+
+ case STATE_SENDING_CMD:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ cmd = host->cmd;
+ host->cmd = NULL;
+ set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
+ dw_mci_command_complete(host, host->mrq->cmd);
+ if (!host->mrq->data || cmd->error) {
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
+ prev_state = state = STATE_SENDING_DATA;
+ /* fall through */
+
+ case STATE_SENDING_DATA:
+ if (test_and_clear_bit(EVENT_DATA_ERROR,
+ &host->pending_events)) {
+ dw_mci_stop_dma(host);
+ if (data->stop)
+ send_stop_cmd(host, data);
+ state = STATE_DATA_ERROR;
+ break;
+ }
+
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+
+ set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+ prev_state = state = STATE_DATA_BUSY;
+ /* fall through */
+
+ case STATE_DATA_BUSY:
+ if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
+ &host->pending_events))
+ break;
+
+ host->data = NULL;
+ set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
+ status = host->data_status;
+
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ if (status & SDMMC_INT_DTO) {
+ dev_err(&host->pdev->dev,
+ "data timeout error\n");
+ data->error = -ETIMEDOUT;
+ } else if (status & SDMMC_INT_DCRC) {
+ dev_err(&host->pdev->dev,
+ "data CRC error\n");
+ data->error = -EILSEQ;
+ } else {
+ dev_err(&host->pdev->dev,
+ "data FIFO error "
+ "(status=%08x)\n",
+ status);
+ data->error = -EIO;
+ }
+ } else {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
+ }
+
+ if (!data->stop) {
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
+ prev_state = state = STATE_SENDING_STOP;
+ if (!data->error)
+ send_stop_cmd(host, data);
+ /* fall through */
+
+ case STATE_SENDING_STOP:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ host->cmd = NULL;
+ dw_mci_command_complete(host, host->mrq->stop);
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+
+ case STATE_DATA_ERROR:
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+
+ state = STATE_DATA_BUSY;
+ break;
+ }
+ } while (state != prev_state);
+
+ host->state = state;
+unlock:
+ spin_unlock(&host->lock);
+
+}
+
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+ u16 *pdata = (u16 *)buf;
+
+ WARN_ON(cnt % 2 != 0);
+
+ cnt = cnt >> 1;
+ while (cnt > 0) {
+ mci_writew(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+ u16 *pdata = (u16 *)buf;
+
+ WARN_ON(cnt % 2 != 0);
+
+ cnt = cnt >> 1;
+ while (cnt > 0) {
+ *pdata++ = mci_readw(host, DATA);
+ cnt--;
+ }
+}
+
+static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
+{
+ u32 *pdata = (u32 *)buf;
+
+ WARN_ON(cnt % 4 != 0);
+ WARN_ON((unsigned long)pdata & 0x3);
+
+ cnt = cnt >> 2;
+ while (cnt > 0) {
+ mci_writel(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
+{
+ u32 *pdata = (u32 *)buf;
+
+ WARN_ON(cnt % 4 != 0);
+ WARN_ON((unsigned long)pdata & 0x3);
+
+ cnt = cnt >> 2;
+ while (cnt > 0) {
+ *pdata++ = mci_readl(host, DATA);
+ cnt--;
+ }
+}
+
+static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
+{
+ u64 *pdata = (u64 *)buf;
+
+ WARN_ON(cnt % 8 != 0);
+
+ cnt = cnt >> 3;
+ while (cnt > 0) {
+ mci_writeq(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
+{
+ u64 *pdata = (u64 *)buf;
+
+ WARN_ON(cnt % 8 != 0);
+
+ cnt = cnt >> 3;
+ while (cnt > 0) {
+ *pdata++ = mci_readq(host, DATA);
+ cnt--;
+ }
+}
+
+static void dw_mci_read_data_pio(struct dw_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len, old_len, count = 0;
+
+ do {
+ len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+ if (count == 0)
+ old_len = len;
+
+ if (offset + len <= sg->length) {
+ host->pull_data(host, (void *)(buf + offset), len);
+
+ offset += len;
+ nbytes += len;
+
+ if (offset == sg->length) {
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+ host->pull_data(host, (void *)(buf + offset),
+ remaining);
+ nbytes += remaining;
+
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = len - remaining;
+ buf = sg_virt(sg);
+ host->pull_data(host, buf, offset);
+ nbytes += offset;
+ }
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+ tasklet_schedule(&host->tasklet);
+ return;
+ }
+ count++;
+ } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
+ len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+ return;
+
+done:
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_write_data_pio(struct dw_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len;
+
+ do {
+ len = SDMMC_FIFO_SZ -
+ (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+ if (offset + len <= sg->length) {
+ host->push_data(host, (void *)(buf + offset), len);
+
+ offset += len;
+ nbytes += len;
+ if (offset == sg->length) {
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+
+ host->push_data(host, (void *)(buf + offset),
+ remaining);
+ nbytes += remaining;
+
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = len - remaining;
+ buf = sg_virt(sg);
+ host->push_data(host, (void *)buf, offset);
+ nbytes += offset;
+ }
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+
+ smp_wmb();
+
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+ tasklet_schedule(&host->tasklet);
+ return;
+ }
+ } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+
+ return;
+
+done:
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
+{
+ if (!host->cmd_status)
+ host->cmd_status = status;
+
+ smp_wmb();
+
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+}
+
+static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
+{
+ struct dw_mci *host = dev_id;
+ u32 status, pending;
+ unsigned int pass_count = 0;
+
+ do {
+ status = mci_readl(host, RINTSTS);
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+ /*
+ * DTO fix - version 2.10a and below, and only if internal DMA
+ * is configured.
+ */
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+ if (!pending &&
+ ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+ pending |= SDMMC_INT_DATA_OVER;
+ }
+
+ if (!pending)
+ break;
+
+ if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+ mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
+ host->cmd_status = status;
+ smp_wmb();
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+ /* if there is an error report DATA_ERROR */
+ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
+ host->data_status = status;
+ smp_wmb();
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & SDMMC_INT_DATA_OVER) {
+ mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+ if (!host->data_status)
+ host->data_status = status;
+ smp_wmb();
+ if (host->dir_status == DW_MCI_RECV_STATUS) {
+ if (host->sg != NULL)
+ dw_mci_read_data_pio(host);
+ }
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & SDMMC_INT_RXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (host->sg)
+ dw_mci_read_data_pio(host);
+ }
+
+ if (pending & SDMMC_INT_TXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (host->sg)
+ dw_mci_write_data_pio(host);
+ }
+
+ if (pending & SDMMC_INT_CMD_DONE) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+ dw_mci_cmd_interrupt(host, status);
+ }
+
+ if (pending & SDMMC_INT_CD) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CD);
+ tasklet_schedule(&host->card_tasklet);
+ }
+
+ } while (pass_count++ < 5);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+ /* Handle DMA interrupts */
+ pending = mci_readl(host, IDSTS);
+ if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ host->dma_ops->complete(host);
+ }
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static void dw_mci_tasklet_card(unsigned long data)
+{
+ struct dw_mci *host = (struct dw_mci *)data;
+ int i;
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ struct mmc_host *mmc = slot->mmc;
+ struct mmc_request *mrq;
+ int present;
+ u32 ctrl;
+
+ present = dw_mci_get_cd(mmc);
+ while (present != slot->last_detect_state) {
+ spin_lock(&host->lock);
+
+ dev_dbg(&slot->mmc->class_dev, "card %s\n",
+ present ? "inserted" : "removed");
+
+ /* Card change detected */
+ slot->last_detect_state = present;
+
+ /* Power up slot */
+ if (present != 0) {
+ if (host->pdata->setpower)
+ host->pdata->setpower(slot->id,
+ mmc->ocr_avail);
+
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ }
+
+ /* Clean up queue if present */
+ mrq = slot->mrq;
+ if (mrq) {
+ if (mrq == host->mrq) {
+ host->data = NULL;
+ host->cmd = NULL;
+
+ switch (host->state) {
+ case STATE_IDLE:
+ break;
+ case STATE_SENDING_CMD:
+ mrq->cmd->error = -ENOMEDIUM;
+ if (!mrq->data)
+ break;
+ /* fall through */
+ case STATE_SENDING_DATA:
+ mrq->data->error = -ENOMEDIUM;
+ dw_mci_stop_dma(host);
+ break;
+ case STATE_DATA_BUSY:
+ case STATE_DATA_ERROR:
+ if (mrq->data->error == -EINPROGRESS)
+ mrq->data->error = -ENOMEDIUM;
+ if (!mrq->stop)
+ break;
+ /* fall through */
+ case STATE_SENDING_STOP:
+ mrq->stop->error = -ENOMEDIUM;
+ break;
+ }
+
+ dw_mci_request_end(host, mrq);
+ } else {
+ list_del(&slot->queue_node);
+ mrq->cmd->error = -ENOMEDIUM;
+ if (mrq->data)
+ mrq->data->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+
+ spin_unlock(&host->lock);
+ mmc_request_done(slot->mmc, mrq);
+ spin_lock(&host->lock);
+ }
+ }
+
+ /* Power down slot */
+ if (present == 0) {
+ if (host->pdata->setpower)
+ host->pdata->setpower(slot->id, 0);
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+ /*
+ * Clear down the FIFO - doing so generates a
+ * block interrupt, hence setting the
+ * scatter-gather pointer to NULL.
+ */
+ host->sg = NULL;
+
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= SDMMC_CTRL_FIFO_RESET;
+ mci_writel(host, CTRL, ctrl);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+ ctrl = mci_readl(host, BMOD);
+ ctrl |= 0x01; /* Software reset of DMA */
+ mci_writel(host, BMOD, ctrl);
+#endif
+
+ }
+
+ spin_unlock(&host->lock);
+ present = dw_mci_get_cd(mmc);
+ }
+
+ mmc_detect_change(slot->mmc,
+ msecs_to_jiffies(host->pdata->detect_delay_ms));
+ }
+}
+
+static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+{
+ struct mmc_host *mmc;
+ struct dw_mci_slot *slot;
+
+ mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ slot = mmc_priv(mmc);
+ slot->id = id;
+ slot->mmc = mmc;
+ slot->host = host;
+
+ mmc->ops = &dw_mci_ops;
+ mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
+ mmc->f_max = host->bus_hz;
+
+ if (host->pdata->get_ocr)
+ mmc->ocr_avail = host->pdata->get_ocr(id);
+ else
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ /*
+ * Start with slot power disabled, it will be enabled when a card
+ * is detected.
+ */
+ if (host->pdata->setpower)
+ host->pdata->setpower(id, 0);
+
+ mmc->caps = 0;
+ if (host->pdata->get_bus_wd)
+ if (host->pdata->get_bus_wd(slot->id) >= 4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+#ifdef CONFIG_MMC_DW_IDMAC
+ mmc->max_segs = host->ring_size;
+ mmc->max_blk_size = 65536;
+ mmc->max_blk_count = host->ring_size;
+ mmc->max_seg_size = 0x1000;
+ mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
+#else
+ if (host->pdata->blk_settings) {
+ mmc->max_segs = host->pdata->blk_settings->max_segs;
+ mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
+ mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
+ mmc->max_req_size = host->pdata->blk_settings->max_req_size;
+ mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
+ } else {
+ /* Useful defaults if platform data is unset. */
+ mmc->max_segs = 64;
+ mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+ mmc->max_blk_count = 512;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+ }
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+ if (dw_mci_get_cd(mmc))
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ else
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+ host->slot[id] = slot;
+ mmc_add_host(mmc);
+
+#if defined(CONFIG_DEBUG_FS)
+ dw_mci_init_debugfs(slot);
+#endif
+
+ /* Card initially undetected */
+ slot->last_detect_state = 0;
+
+ return 0;
+}
+
+static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
+{
+ /* Shutdown detect IRQ */
+ if (slot->host->pdata->exit)
+ slot->host->pdata->exit(id);
+
+ /* Debugfs stuff is cleaned up by mmc core */
+ mmc_remove_host(slot->mmc);
+ slot->host->slot[id] = NULL;
+ mmc_free_host(slot->mmc);
+}
+
+static void dw_mci_init_dma(struct dw_mci *host)
+{
+ /* Alloc memory for sg translation */
+ host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
+ &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
+ __func__);
+ goto no_dma;
+ }
+
+ /* Determine which DMA interface to use */
+#ifdef CONFIG_MMC_DW_IDMAC
+ host->dma_ops = &dw_mci_idmac_ops;
+ dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
+#endif
+
+ if (!host->dma_ops)
+ goto no_dma;
+
+ if (host->dma_ops->init) {
+ if (host->dma_ops->init(host)) {
+ dev_err(&host->pdev->dev, "%s: Unable to initialize "
+ "DMA Controller.\n", __func__);
+ goto no_dma;
+ }
+ } else {
+ dev_err(&host->pdev->dev, "DMA initialization not found.\n");
+ goto no_dma;
+ }
+
+ host->use_dma = 1;
+ return;
+
+no_dma:
+ dev_info(&host->pdev->dev, "Using PIO mode.\n");
+ host->use_dma = 0;
+ return;
+}
+
+static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ unsigned int ctrl;
+
+ mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_DMA_RESET));
+
+ /* wait till resets clear */
+ do {
+ ctrl = mci_readl(host, CTRL);
+ if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_DMA_RESET)))
+ return true;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+
+ return false;
+}
+
+static int dw_mci_probe(struct platform_device *pdev)
+{
+ struct dw_mci *host;
+ struct resource *regs;
+ struct dw_mci_board *pdata;
+ int irq, ret, i, width;
+ u32 fifo_size;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+ host->pdata = pdata = pdev->dev.platform_data;
+ if (!pdata || !pdata->init) {
+ dev_err(&pdev->dev,
+ "Platform data must supply init function\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ if (!pdata->select_slot && pdata->num_slots > 1) {
+ dev_err(&pdev->dev,
+ "Platform data must supply select_slot function\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ if (!pdata->bus_hz) {
+ dev_err(&pdev->dev,
+ "Platform data must supply bus speed\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ host->bus_hz = pdata->bus_hz;
+ host->quirks = pdata->quirks;
+
+ spin_lock_init(&host->lock);
+ INIT_LIST_HEAD(&host->queue);
+
+ ret = -ENOMEM;
+ host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ if (!host->regs)
+ goto err_freehost;
+
+ host->dma_ops = pdata->dma_ops;
+ dw_mci_init_dma(host);
+
+ /*
+ * Get the host data width - this assumes that HCON has been set with
+ * the correct values.
+ */
+ i = (mci_readl(host, HCON) >> 7) & 0x7;
+ if (!i) {
+ host->push_data = dw_mci_push_data16;
+ host->pull_data = dw_mci_pull_data16;
+ width = 16;
+ host->data_shift = 1;
+ } else if (i == 2) {
+ host->push_data = dw_mci_push_data64;
+ host->pull_data = dw_mci_pull_data64;
+ width = 64;
+ host->data_shift = 3;
+ } else {
+ /* Check for a reserved value, and warn if it is */
+ WARN((i != 1),
+ "HCON reports a reserved host data width!\n"
+ "Defaulting to 32-bit access.\n");
+ host->push_data = dw_mci_push_data32;
+ host->pull_data = dw_mci_pull_data32;
+ width = 32;
+ host->data_shift = 2;
+ }
+
+ /* Reset all blocks */
+ if (!mci_wait_reset(&pdev->dev, host)) {
+ ret = -ENODEV;
+ goto err_dmaunmap;
+ }
+
+ /* Clear the interrupts for the host controller */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ /* Put in max timeout */
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+ /*
+ * FIFO threshold settings RxMark = fifo_size / 2 - 1,
+ * Tx Mark = fifo_size / 2 DMA Size = 8
+ */
+ fifo_size = mci_readl(host, FIFOTH);
+ fifo_size = (fifo_size >> 16) & 0x7ff;
+ mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
+ ((fifo_size/2) << 0)));
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+ tasklet_init(&host->card_tasklet,
+ dw_mci_tasklet_card, (unsigned long)host);
+
+ ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
+ if (ret)
+ goto err_dmaunmap;
+
+ platform_set_drvdata(pdev, host);
+
+ if (host->pdata->num_slots)
+ host->num_slots = host->pdata->num_slots;
+ else
+ host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+
+ /* We need at least one slot to succeed */
+ for (i = 0; i < host->num_slots; i++) {
+ ret = dw_mci_init_slot(host, i);
+ if (ret) {
+ ret = -ENODEV;
+ goto err_init_slot;
+ }
+ }
+
+ /*
+ * Enable interrupts for command done, data over, data empty, card det,
+ * receive ready and error such as transmit, receive timeout, crc error
+ */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+ dev_info(&pdev->dev, "DW MMC controller at irq %d, "
+ "%d bit host data width\n", irq, width);
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
+ dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
+
+ return 0;
+
+err_init_slot:
+ /* De-init any initialized slots */
+ while (i > 0) {
+ if (host->slot[i])
+ dw_mci_cleanup_slot(host->slot[i], i);
+ i--;
+ }
+ free_irq(irq, host);
+
+err_dmaunmap:
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+ dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
+ host->sg_cpu, host->sg_dma);
+ iounmap(host->regs);
+
+err_freehost:
+ kfree(host);
+ return ret;
+}
+
+static int __exit dw_mci_remove(struct platform_device *pdev)
+{
+ struct dw_mci *host = platform_get_drvdata(pdev);
+ int i;
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ platform_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < host->num_slots; i++) {
+ dev_dbg(&pdev->dev, "remove slot %d\n", i);
+ if (host->slot[i])
+ dw_mci_cleanup_slot(host->slot[i], i);
+ }
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ free_irq(platform_get_irq(pdev, 0), host);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+
+ iounmap(host->regs);
+
+ kfree(host);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ int i, ret;
+ struct dw_mci *host = platform_get_drvdata(pdev);
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ if (!slot)
+ continue;
+ ret = mmc_suspend_host(slot->mmc);
+ if (ret < 0) {
+ while (--i >= 0) {
+ slot = host->slot[i];
+ if (slot)
+ mmc_resume_host(host->slot[i]->mmc);
+ }
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dw_mci_resume(struct platform_device *pdev)
+{
+ int i, ret;
+ struct dw_mci *host = platform_get_drvdata(pdev);
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ if (!slot)
+ continue;
+ ret = mmc_resume_host(host->slot[i]->mmc);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+#else
+#define dw_mci_suspend NULL
+#define dw_mci_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver dw_mci_driver = {
+ .remove = __exit_p(dw_mci_remove),
+ .suspend = dw_mci_suspend,
+ .resume = dw_mci_resume,
+ .driver = {
+ .name = "dw_mmc",
+ },
+};
+
+static int __init dw_mci_init(void)
+{
+ return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
+}
+
+static void __exit dw_mci_exit(void)
+{
+ platform_driver_unregister(&dw_mci_driver);
+}
+
+module_init(dw_mci_init);
+module_exit(dw_mci_exit);
+
+MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 00000000000..5dd55a75233
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_H_
+#define _DW_MMC_H_
+
+#define SDMMC_CTRL 0x000
+#define SDMMC_PWREN 0x004
+#define SDMMC_CLKDIV 0x008
+#define SDMMC_CLKSRC 0x00c
+#define SDMMC_CLKENA 0x010
+#define SDMMC_TMOUT 0x014
+#define SDMMC_CTYPE 0x018
+#define SDMMC_BLKSIZ 0x01c
+#define SDMMC_BYTCNT 0x020
+#define SDMMC_INTMASK 0x024
+#define SDMMC_CMDARG 0x028
+#define SDMMC_CMD 0x02c
+#define SDMMC_RESP0 0x030
+#define SDMMC_RESP1 0x034
+#define SDMMC_RESP2 0x038
+#define SDMMC_RESP3 0x03c
+#define SDMMC_MINTSTS 0x040
+#define SDMMC_RINTSTS 0x044
+#define SDMMC_STATUS 0x048
+#define SDMMC_FIFOTH 0x04c
+#define SDMMC_CDETECT 0x050
+#define SDMMC_WRTPRT 0x054
+#define SDMMC_GPIO 0x058
+#define SDMMC_TCBCNT 0x05c
+#define SDMMC_TBBCNT 0x060
+#define SDMMC_DEBNCE 0x064
+#define SDMMC_USRID 0x068
+#define SDMMC_VERID 0x06c
+#define SDMMC_HCON 0x070
+#define SDMMC_BMOD 0x080
+#define SDMMC_PLDMND 0x084
+#define SDMMC_DBADDR 0x088
+#define SDMMC_IDSTS 0x08c
+#define SDMMC_IDINTEN 0x090
+#define SDMMC_DSCADDR 0x094
+#define SDMMC_BUFADDR 0x098
+#define SDMMC_DATA 0x100
+#define SDMMC_DATA_ADR 0x100
+
+/* shift bit field */
+#define _SBF(f, v) ((v) << (f))
+
+/* Control register defines */
+#define SDMMC_CTRL_USE_IDMAC BIT(25)
+#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
+#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
+#define SDMMC_CTRL_SEND_CCSD BIT(9)
+#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
+#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
+#define SDMMC_CTRL_READ_WAIT BIT(6)
+#define SDMMC_CTRL_DMA_ENABLE BIT(5)
+#define SDMMC_CTRL_INT_ENABLE BIT(4)
+#define SDMMC_CTRL_DMA_RESET BIT(2)
+#define SDMMC_CTRL_FIFO_RESET BIT(1)
+#define SDMMC_CTRL_RESET BIT(0)
+/* Clock Enable register defines */
+#define SDMMC_CLKEN_LOW_PWR BIT(16)
+#define SDMMC_CLKEN_ENABLE BIT(0)
+/* time-out register defines */
+#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
+#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
+#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
+#define SDMMC_TMOUT_RESP_MSK 0xFF
+/* card-type register defines */
+#define SDMMC_CTYPE_8BIT BIT(16)
+#define SDMMC_CTYPE_4BIT BIT(0)
+#define SDMMC_CTYPE_1BIT 0
+/* Interrupt status & mask register defines */
+#define SDMMC_INT_SDIO BIT(16)
+#define SDMMC_INT_EBE BIT(15)
+#define SDMMC_INT_ACD BIT(14)
+#define SDMMC_INT_SBE BIT(13)
+#define SDMMC_INT_HLE BIT(12)
+#define SDMMC_INT_FRUN BIT(11)
+#define SDMMC_INT_HTO BIT(10)
+#define SDMMC_INT_DTO BIT(9)
+#define SDMMC_INT_RTO BIT(8)
+#define SDMMC_INT_DCRC BIT(7)
+#define SDMMC_INT_RCRC BIT(6)
+#define SDMMC_INT_RXDR BIT(5)
+#define SDMMC_INT_TXDR BIT(4)
+#define SDMMC_INT_DATA_OVER BIT(3)
+#define SDMMC_INT_CMD_DONE BIT(2)
+#define SDMMC_INT_RESP_ERR BIT(1)
+#define SDMMC_INT_CD BIT(0)
+#define SDMMC_INT_ERROR 0xbfc2
+/* Command register defines */
+#define SDMMC_CMD_START BIT(31)
+#define SDMMC_CMD_CCS_EXP BIT(23)
+#define SDMMC_CMD_CEATA_RD BIT(22)
+#define SDMMC_CMD_UPD_CLK BIT(21)
+#define SDMMC_CMD_INIT BIT(15)
+#define SDMMC_CMD_STOP BIT(14)
+#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
+#define SDMMC_CMD_SEND_STOP BIT(12)
+#define SDMMC_CMD_STRM_MODE BIT(11)
+#define SDMMC_CMD_DAT_WR BIT(10)
+#define SDMMC_CMD_DAT_EXP BIT(9)
+#define SDMMC_CMD_RESP_CRC BIT(8)
+#define SDMMC_CMD_RESP_LONG BIT(7)
+#define SDMMC_CMD_RESP_EXP BIT(6)
+#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
+/* Status register defines */
+#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
+#define SDMMC_FIFO_SZ 32
+/* Internal DMAC interrupt defines */
+#define SDMMC_IDMAC_INT_AI BIT(9)
+#define SDMMC_IDMAC_INT_NI BIT(8)
+#define SDMMC_IDMAC_INT_CES BIT(5)
+#define SDMMC_IDMAC_INT_DU BIT(4)
+#define SDMMC_IDMAC_INT_FBE BIT(2)
+#define SDMMC_IDMAC_INT_RI BIT(1)
+#define SDMMC_IDMAC_INT_TI BIT(0)
+/* Internal DMAC bus mode bits */
+#define SDMMC_IDMAC_ENABLE BIT(7)
+#define SDMMC_IDMAC_FB BIT(1)
+#define SDMMC_IDMAC_SWRESET BIT(0)
+
+/* Register access macros */
+#define mci_readl(dev, reg) \
+ __raw_readl(dev->regs + SDMMC_##reg)
+#define mci_writel(dev, reg, value) \
+ __raw_writel((value), dev->regs + SDMMC_##reg)
+
+/* 16-bit FIFO access macros */
+#define mci_readw(dev, reg) \
+ __raw_readw(dev->regs + SDMMC_##reg)
+#define mci_writew(dev, reg, value) \
+ __raw_writew((value), dev->regs + SDMMC_##reg)
+
+/* 64-bit FIFO access macros */
+#ifdef readq
+#define mci_readq(dev, reg) \
+ __raw_readq(dev->regs + SDMMC_##reg)
+#define mci_writeq(dev, reg, value) \
+ __raw_writeq((value), dev->regs + SDMMC_##reg)
+#else
+/*
+ * Dummy readq implementation for architectures that don't define it.
+ *
+ * We would assume that none of these architectures would configure
+ * the IP block with a 64bit FIFO width, so this code will never be
+ * executed on those machines. Defining these macros here keeps the
+ * rest of the code free from ifdefs.
+ */
+#define mci_readq(dev, reg) \
+ (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+#define mci_writeq(dev, reg, value) \
+ (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+#endif
+
+#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb87cb..4428594261c 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,6 +31,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <asm/dma.h>
#include <asm/irq.h>
@@ -141,10 +142,49 @@ struct mxcmci_host {
struct work_struct datawork;
spinlock_t lock;
+
+ struct regulator *vcc;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
+static inline void mxcmci_init_ocr(struct mxcmci_host *host)
+{
+ host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+
+ if (IS_ERR(host->vcc)) {
+ host->vcc = NULL;
+ } else {
+ host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
+ if (host->pdata && host->pdata->ocr_avail)
+ dev_warn(mmc_dev(host->mmc),
+ "pdata->ocr_avail will not be used\n");
+ }
+
+ if (host->vcc == NULL) {
+ /* fall-back to platform data */
+ if (host->pdata && host->pdata->ocr_avail)
+ host->mmc->ocr_avail = host->pdata->ocr_avail;
+ else
+ host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+}
+
+static inline void mxcmci_set_power(struct mxcmci_host *host,
+ unsigned char power_mode,
+ unsigned int vdd)
+{
+ if (host->vcc) {
+ if (power_mode == MMC_POWER_UP)
+ mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+ else if (power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+ }
+
+ if (host->pdata && host->pdata->setpower)
+ host->pdata->setpower(mmc_dev(host->mmc), vdd);
+}
+
static inline int mxcmci_use_dma(struct mxcmci_host *host)
{
return host->do_dma;
@@ -680,9 +720,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
if (host->power_mode != ios->power_mode) {
- if (host->pdata && host->pdata->setpower)
- host->pdata->setpower(mmc_dev(mmc), ios->vdd);
+ mxcmci_set_power(host, ios->power_mode, ios->vdd);
host->power_mode = ios->power_mode;
+
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMD_DAT_CONT_INIT;
}
@@ -807,10 +847,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host->pdata = pdev->dev.platform_data;
spin_lock_init(&host->lock);
- if (host->pdata && host->pdata->ocr_avail)
- mmc->ocr_avail = host->pdata->ocr_avail;
- else
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mxcmci_init_ocr(host);
if (host->pdata && host->pdata->dat3_card_detect)
host->default_irq_mask =
@@ -915,6 +952,9 @@ static int mxcmci_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
+ if (host->vcc)
+ regulator_put(host->vcc);
+
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
@@ -927,7 +967,6 @@ static int mxcmci_remove(struct platform_device *pdev)
clk_put(host->clk);
release_mem_region(host->res->start, resource_size(host->res));
- release_resource(host->res);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 00000000000..2aeef4ffed8
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
+/*
+ * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
+ *
+ * Author: Saeed Bishara <saeed@marvell.com>
+ * Mike Rapoport <mike@compulab.co.il>
+ * Based on sdhci-cns3xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+
+ switch (reg) {
+ case SDHCI_HOST_VERSION:
+ case SDHCI_SLOT_INT_STATUS:
+ /* those registers don't exist */
+ return 0;
+ default:
+ ret = readw(host->ioaddr + reg);
+ }
+ return ret;
+}
+
+static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+
+ switch (reg) {
+ case SDHCI_CAPABILITIES:
+ ret = readl(host->ioaddr + reg);
+ /* Mask the support for 3.0V */
+ ret &= ~SDHCI_CAN_VDD_300;
+ break;
+ default:
+ ret = readl(host->ioaddr + reg);
+ }
+ return ret;
+}
+
+static struct sdhci_ops sdhci_dove_ops = {
+ .read_w = sdhci_dove_readw,
+ .read_l = sdhci_dove_readl,
+};
+
+struct sdhci_pltfm_data sdhci_dove_pdata = {
+ .ops = &sdhci_dove_ops,
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_FORCE_DMA,
+};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c2460d43..0dc905b20ee 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
};
+/* O2Micro extra registers */
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+
+static int o2_probe(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u8 scratch;
+
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_8220:
+ case PCI_DEVICE_ID_O2_8221:
+ case PCI_DEVICE_ID_O2_8320:
+ case PCI_DEVICE_ID_O2_8321:
+ /* This extra setup is required due to broken ADMA. */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Set Multi 3 to VCC3V# */
+ pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+ /* Disable CLK_REQ# support after media DET */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x20;
+ pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+ /* Choose capabilities, enable SDMA. We have to write 0x01
+ * to the capabilities register first to unlock it.
+ */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x01;
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+ /* Disable ADMA1/2 */
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+ /* Disable the infinite transfer mode */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x08;
+ pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ }
+
+ return 0;
+}
+
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -204,6 +272,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
static int jmicron_probe(struct sdhci_pci_chip *chip)
{
int ret;
+ u16 mmcdev = 0;
if (chip->pdev->revision == 0) {
chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
* 2. The MMC interface has a lower subfunction number
* than the SD interface.
*/
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) {
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
+ mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
+ else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
+ mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
+
+ if (mmcdev) {
struct pci_dev *sd_dev;
sd_dev = NULL;
while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
- PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) {
+ mmcdev, sd_dev)) != NULL) {
if ((PCI_SLOT(chip->pdev->devfn) ==
PCI_SLOT(sd_dev->devfn)) &&
(chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
}
+ /* JM388 MMC doesn't support 1.8V while SD supports it */
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+ slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_29_30 | MMC_VDD_30_31 |
+ MMC_VDD_165_195; /* allow 1.8V */
+ slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
+ }
+
/*
* The secondary interface requires a bit set to get the
* interrupts.
*/
- if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 1);
+ slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+
return 0;
}
@@ -305,7 +391,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
if (dead)
return;
- if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 0);
}
@@ -313,7 +400,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
{
int i;
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0;i < chip->num_slots;i++)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
@@ -325,7 +413,8 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
{
int ret, i;
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0;i < chip->num_slots;i++)
jmicron_enable_mmc(chip->slots[i]->host, 1);
}
@@ -339,6 +428,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
return 0;
}
+static const struct sdhci_pci_fixes sdhci_o2 = {
+ .probe = o2_probe,
+};
+
static const struct sdhci_pci_fixes sdhci_jmicron = {
.probe = jmicron_probe,
@@ -510,6 +603,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
},
{
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_SYSKONNECT,
.device = 0x8000,
.subvendor = PCI_ANY_ID,
@@ -589,6 +698,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
},
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8120,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8220,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8221,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8320,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8321,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89f662..dbab0407f4b 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
{ "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
#endif
+#ifdef CONFIG_MMC_SDHCI_DOVE
+ { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
+#endif
+#ifdef CONFIG_MMC_SDHCI_TEGRA
+ { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
+#endif
{ },
};
MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48af56..ea2e44d9be5 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -22,5 +22,7 @@ struct sdhci_pltfm_host {
extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
+extern struct sdhci_pltfm_data sdhci_dove_pdata;
+extern struct sdhci_pltfm_data sdhci_tegra_pdata;
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862ecc8..17203586305 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
if (!clksrc)
return UINT_MAX;
+ /*
+ * Clock divider's step is different as 1 from that of host controller
+ * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
+ */
+ if (ourhost->pdata->clk_type) {
+ rate = clk_round_rate(clksrc, wanted);
+ return wanted - rate;
+ }
+
rate = clk_get_rate(clksrc);
for (div = 1; div < 256; div *= 2) {
@@ -232,6 +241,42 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
return min;
}
+/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
+static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+
+ return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
+}
+
+/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
+static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+
+ /*
+ * initial clock can be in the frequency range of
+ * 100KHz-400KHz, so we set it as max value.
+ */
+ return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
+}
+
+/* sdhci_cmu_set_clock - callback on clock change.*/
+static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+
+ /* don't bother if the clock is going off */
+ if (clock == 0)
+ return;
+
+ sdhci_s3c_set_clock(host, clock);
+
+ clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+
+ host->clock = clock;
+}
+
static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
@@ -361,6 +406,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
clks++;
sc->clk_bus[ptr] = clk;
+
+ /*
+ * save current clock index to know which clock bus
+ * is used later in overriding functions.
+ */
+ sc->cur_clk = ptr;
+
clk_enable(clk);
dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -427,6 +479,20 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
+ /*
+ * If controller does not have internal clock divider,
+ * we can use overriding functions instead of default.
+ */
+ if (pdata->clk_type) {
+ sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+ sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+ sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+ }
+
+ /* It supports additional host capabilities if needed */
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 00000000000..4823ee94a63
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <mach/gpio.h>
+#include <mach/sdhci.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val;
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ /* Use wp_gpio here instead? */
+ val = readl(host->ioaddr + reg);
+ return val | SDHCI_WRITE_PROTECT;
+ }
+
+ return readl(host->ioaddr + reg);
+}
+
+static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+{
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+ /* Erratum: Version register is invalid in HW. */
+ return SDHCI_SPEC_200;
+ }
+
+ return readw(host->ioaddr + reg);
+}
+
+static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ /* Seems like we're getting spurious timeout and crc errors, so
+ * disable signalling of them. In case of real errors software
+ * timers should take care of eventually detecting them.
+ */
+ if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
+ val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
+
+ writel(val, host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_INT_ENABLE)) {
+ /* Erratum: Must enable block gap interrupt detection */
+ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ if (val & SDHCI_INT_CARD_INT)
+ gap_ctrl |= 0x8;
+ else
+ gap_ctrl &= ~0x8;
+ writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ }
+}
+
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
+ struct tegra_sdhci_platform_data *plat;
+
+ plat = pdev->dev.platform_data;
+
+ if (!gpio_is_valid(plat->wp_gpio))
+ return -1;
+
+ return gpio_get_value(plat->wp_gpio);
+}
+
+static irqreturn_t carddetect_irq(int irq, void *data)
+{
+ struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+ tasklet_schedule(&sdhost->card_tasklet);
+ return IRQ_HANDLED;
+};
+
+static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct tegra_sdhci_platform_data *plat;
+ u32 ctrl;
+
+ plat = pdev->dev.platform_data;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (bus_width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ return 0;
+}
+
+
+static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
+ struct sdhci_pltfm_data *pdata)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct tegra_sdhci_platform_data *plat;
+ struct clk *clk;
+ int rc;
+
+ plat = pdev->dev.platform_data;
+ if (plat == NULL) {
+ dev_err(mmc_dev(host->mmc), "missing platform data\n");
+ return -ENXIO;
+ }
+
+ if (gpio_is_valid(plat->power_gpio)) {
+ rc = gpio_request(plat->power_gpio, "sdhci_power");
+ if (rc) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate power gpio\n");
+ goto out;
+ }
+ tegra_gpio_enable(plat->power_gpio);
+ gpio_direction_output(plat->power_gpio, 1);
+ }
+
+ if (gpio_is_valid(plat->cd_gpio)) {
+ rc = gpio_request(plat->cd_gpio, "sdhci_cd");
+ if (rc) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate cd gpio\n");
+ goto out_power;
+ }
+ tegra_gpio_enable(plat->cd_gpio);
+ gpio_direction_input(plat->cd_gpio);
+
+ rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ mmc_hostname(host->mmc), host);
+
+ if (rc) {
+ dev_err(mmc_dev(host->mmc), "request irq error\n");
+ goto out_cd;
+ }
+
+ }
+
+ if (gpio_is_valid(plat->wp_gpio)) {
+ rc = gpio_request(plat->wp_gpio, "sdhci_wp");
+ if (rc) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate wp gpio\n");
+ goto out_cd;
+ }
+ tegra_gpio_enable(plat->wp_gpio);
+ gpio_direction_input(plat->wp_gpio);
+ }
+
+ clk = clk_get(mmc_dev(host->mmc), NULL);
+ if (IS_ERR(clk)) {
+ dev_err(mmc_dev(host->mmc), "clk err\n");
+ rc = PTR_ERR(clk);
+ goto out_wp;
+ }
+ clk_enable(clk);
+ pltfm_host->clk = clk;
+
+ if (plat->is_8bit)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ return 0;
+
+out_wp:
+ if (gpio_is_valid(plat->wp_gpio)) {
+ tegra_gpio_disable(plat->wp_gpio);
+ gpio_free(plat->wp_gpio);
+ }
+
+out_cd:
+ if (gpio_is_valid(plat->cd_gpio)) {
+ tegra_gpio_disable(plat->cd_gpio);
+ gpio_free(plat->cd_gpio);
+ }
+
+out_power:
+ if (gpio_is_valid(plat->power_gpio)) {
+ tegra_gpio_disable(plat->power_gpio);
+ gpio_free(plat->power_gpio);
+ }
+
+out:
+ return rc;
+}
+
+static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct tegra_sdhci_platform_data *plat;
+
+ plat = pdev->dev.platform_data;
+
+ if (gpio_is_valid(plat->wp_gpio)) {
+ tegra_gpio_disable(plat->wp_gpio);
+ gpio_free(plat->wp_gpio);
+ }
+
+ if (gpio_is_valid(plat->cd_gpio)) {
+ tegra_gpio_disable(plat->cd_gpio);
+ gpio_free(plat->cd_gpio);
+ }
+
+ if (gpio_is_valid(plat->power_gpio)) {
+ tegra_gpio_disable(plat->power_gpio);
+ gpio_free(plat->power_gpio);
+ }
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+}
+
+static struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_l = tegra_sdhci_readl,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .platform_8bit_width = tegra_sdhci_8bit,
+};
+
+struct sdhci_pltfm_data sdhci_tegra_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .ops = &tegra_sdhci_ops,
+ .init = tegra_sdhci_pltfm_init,
+ .exit = tegra_sdhci_pltfm_exit,
+};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db426c91..9e15f41f87b 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
#include <linux/leds.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
#include "sdhci.h"
@@ -77,8 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
sdhci_readw(host, SDHCI_ACMD12_ERR),
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
+ printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
+ sdhci_readl(host, SDHCI_CAPABILITIES_1));
+ printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
if (host->flags & SDHCI_USE_ADMA)
@@ -1518,7 +1522,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT)
host->data->error = -ETIMEDOUT;
- else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+ else if (intmask & SDHCI_INT_DATA_END_BIT)
+ host->data->error = -EILSEQ;
+ else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+ != MMC_BUS_TEST_R)
host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) {
printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1744,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
- unsigned int caps;
+ unsigned int caps, ocr_avail;
int ret;
WARN_ON(host == NULL);
@@ -1890,13 +1898,26 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_card_is_removable(mmc))
mmc->caps |= MMC_CAP_NEEDS_POLL;
- mmc->ocr_avail = 0;
+ ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330)
- mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
+ ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
if (caps & SDHCI_CAN_VDD_300)
- mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
+ ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
if (caps & SDHCI_CAN_VDD_180)
- mmc->ocr_avail |= MMC_VDD_165_195;
+ ocr_avail |= MMC_VDD_165_195;
+
+ mmc->ocr_avail = ocr_avail;
+ mmc->ocr_avail_sdio = ocr_avail;
+ if (host->ocr_avail_sdio)
+ mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
+ mmc->ocr_avail_sd = ocr_avail;
+ if (host->ocr_avail_sd)
+ mmc->ocr_avail_sd &= host->ocr_avail_sd;
+ else /* normal SD controllers don't support 1.8V */
+ mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
+ mmc->ocr_avail_mmc = ocr_avail;
+ if (host->ocr_avail_mmc)
+ mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
if (mmc->ocr_avail == 0) {
printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1949,14 @@ int sdhci_add_host(struct sdhci_host *host)
* of bytes. When doing hardware scatter/gather, each entry cannot
* be larger than 64 KiB though.
*/
- if (host->flags & SDHCI_USE_ADMA)
- mmc->max_seg_size = 65536;
- else
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ mmc->max_seg_size = 65535;
+ else
+ mmc->max_seg_size = 65536;
+ } else {
mmc->max_seg_size = mmc->max_req_size;
+ }
/*
* Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f00c06..6e0969e4065 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -52,6 +52,7 @@
#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
+#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
#define SDHCI_RESPONSE 0x10
@@ -165,7 +166,7 @@
#define SDHCI_CAN_VDD_180 0x04000000
#define SDHCI_CAN_64BIT 0x10000000
-/* 44-47 reserved for more caps */
+#define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_MAX_CURRENT 0x48
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a89593..e3c6ef20839 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -25,16 +25,261 @@
* double buffer support
*
*/
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/device.h>
+
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmaengine.h>
-#include <linux/mmc/host.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+#define CTL_STATUS 0x1c
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_RESET_SD 0xe0
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND 0x00000001
+#define TMIO_STAT_DATAEND 0x00000004
+#define TMIO_STAT_CARD_REMOVE 0x00000008
+#define TMIO_STAT_CARD_INSERT 0x00000010
+#define TMIO_STAT_SIGSTATE 0x00000020
+#define TMIO_STAT_WRPROTECT 0x00000080
+#define TMIO_STAT_CARD_REMOVE_A 0x00000100
+#define TMIO_STAT_CARD_INSERT_A 0x00000200
+#define TMIO_STAT_SIGSTATE_A 0x00000400
+#define TMIO_STAT_CMD_IDX_ERR 0x00010000
+#define TMIO_STAT_CRCFAIL 0x00020000
+#define TMIO_STAT_STOPBIT_ERR 0x00040000
+#define TMIO_STAT_DATATIMEOUT 0x00080000
+#define TMIO_STAT_RXOVERFLOW 0x00100000
+#define TMIO_STAT_TXUNDERRUN 0x00200000
+#define TMIO_STAT_CMDTIMEOUT 0x00400000
+#define TMIO_STAT_RXRDY 0x01000000
+#define TMIO_STAT_TXRQ 0x02000000
+#define TMIO_STAT_ILL_FUNC 0x20000000
+#define TMIO_STAT_CMD_BUSY 0x40000000
+#define TMIO_STAT_ILL_ACCESS 0x80000000
+
+/* Definitions for values the CTRL_SDIO_STATUS register can take. */
+#define TMIO_SDIO_STAT_IOIRQ 0x0001
+#define TMIO_SDIO_STAT_EXPUB52 0x4000
+#define TMIO_SDIO_STAT_EXWT 0x8000
+#define TMIO_SDIO_MASK_ALL 0xc007
+
+/* Define some IRQ masks */
+/* This is the mask used at reset by the chip */
+#define TMIO_MASK_ALL 0x837f031d
+#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
+#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
+#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
+#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+
+#define enable_mmc_irqs(host, i) \
+ do { \
+ u32 mask;\
+ mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+ mask &= ~((i) & TMIO_MASK_IRQ); \
+ sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+ } while (0)
+
+#define disable_mmc_irqs(host, i) \
+ do { \
+ u32 mask;\
+ mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+ mask |= ((i) & TMIO_MASK_IRQ); \
+ sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+ } while (0)
+
+#define ack_mmc_irqs(host, i) \
+ do { \
+ sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
+ } while (0)
+
+/* This is arbitrary, just noone needed any higher alignment yet */
+#define MAX_ALIGN 4
+
+struct tmio_mmc_host {
+ void __iomem *ctl;
+ unsigned long bus_shift;
+ struct mmc_command *cmd;
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ int irq;
+ unsigned int sdio_irq_enabled;
+
+ /* Callbacks for clock / power control */
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
+
+ /* pio related stuff */
+ struct scatterlist *sg_ptr;
+ struct scatterlist *sg_orig;
+ unsigned int sg_len;
+ unsigned int sg_off;
+
+ struct platform_device *pdev;
+
+ /* DMA support */
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct tasklet_struct dma_complete;
+ struct tasklet_struct dma_issue;
+#ifdef CONFIG_TMIO_MMC_DMA
+ unsigned int dma_sglen;
+ u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
+ struct scatterlist bounce_sg;
+#endif
+
+ /* Track lost interrupts */
+ struct delayed_work delayed_reset_work;
+ spinlock_t lock;
+ unsigned long last_req_ts;
+};
+
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
+
+static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
-#include "tmio_mmc.h"
+static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
+{
+ host->sg_len = data->sg_len;
+ host->sg_ptr = data->sg;
+ host->sg_orig = data->sg;
+ host->sg_off = 0;
+}
+
+static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
+{
+ host->sg_ptr = sg_next(host->sg_ptr);
+ host->sg_off = 0;
+ return --host->sg_len;
+}
+
+static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+{
+ local_irq_save(*flags);
+ return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
+{
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ local_irq_restore(*flags);
+}
+
+#ifdef CONFIG_MMC_DEBUG
+
+#define STATUS_TO_TEXT(a) \
+ do { \
+ if (status & TMIO_STAT_##a) \
+ printk(#a); \
+ } while (0)
+
+void pr_debug_status(u32 status)
+{
+ printk(KERN_DEBUG "status: %08x = ", status);
+ STATUS_TO_TEXT(CARD_REMOVE);
+ STATUS_TO_TEXT(CARD_INSERT);
+ STATUS_TO_TEXT(SIGSTATE);
+ STATUS_TO_TEXT(WRPROTECT);
+ STATUS_TO_TEXT(CARD_REMOVE_A);
+ STATUS_TO_TEXT(CARD_INSERT_A);
+ STATUS_TO_TEXT(SIGSTATE_A);
+ STATUS_TO_TEXT(CMD_IDX_ERR);
+ STATUS_TO_TEXT(STOPBIT_ERR);
+ STATUS_TO_TEXT(ILL_FUNC);
+ STATUS_TO_TEXT(CMD_BUSY);
+ STATUS_TO_TEXT(CMDRESPEND);
+ STATUS_TO_TEXT(DATAEND);
+ STATUS_TO_TEXT(CRCFAIL);
+ STATUS_TO_TEXT(DATATIMEOUT);
+ STATUS_TO_TEXT(CMDTIMEOUT);
+ STATUS_TO_TEXT(RXOVERFLOW);
+ STATUS_TO_TEXT(TXUNDERRUN);
+ STATUS_TO_TEXT(RXRDY);
+ STATUS_TO_TEXT(TXRQ);
+ STATUS_TO_TEXT(ILL_ACCESS);
+ printk("\n");
+}
+
+#else
+#define pr_debug_status(s) do { } while (0)
+#endif
+
+static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (enable) {
+ host->sdio_irq_enabled = 1;
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
+ (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
+ } else {
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+ host->sdio_irq_enabled = 0;
+ }
+}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
{
@@ -55,8 +300,23 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+
+ /*
+ * Testing on sh-mobile showed that SDIO IRQs are unmasked when
+ * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
+ * device IRQ here and restore the SDIO IRQ mask before
+ * re-enabling the device IRQ.
+ */
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+ disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+ enable_irq(host->irq);
+ }
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
@@ -64,11 +324,21 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
+ /* see comment in tmio_mmc_clk_stop above */
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+ disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+ enable_irq(host->irq);
+ }
}
static void reset(struct tmio_mmc_host *host)
@@ -82,15 +352,60 @@ static void reset(struct tmio_mmc_host *host)
msleep(10);
}
+static void tmio_mmc_reset_work(struct work_struct *work)
+{
+ struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+ delayed_reset_work.work);
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mrq = host->mrq;
+
+ /* request already finished */
+ if (!mrq
+ || time_is_after_jiffies(host->last_req_ts +
+ msecs_to_jiffies(2000))) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ dev_warn(&host->pdev->dev,
+ "timeout waiting for hardware interrupt (CMD%u)\n",
+ mrq->cmd->opcode);
+
+ if (host->data)
+ host->data->error = -ETIMEDOUT;
+ else if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ mrq->cmd->error = -ETIMEDOUT;
+
+ host->cmd = NULL;
+ host->data = NULL;
+ host->mrq = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ reset(host);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
static void
tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
struct mmc_request *mrq = host->mrq;
+ if (!mrq)
+ return;
+
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
+ cancel_delayed_work(&host->delayed_reset_work);
+
mmc_request_done(host->mmc, mrq);
}
@@ -200,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
+/* needs to be called with host->lock held */
static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
@@ -233,6 +549,8 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
if (data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
disable_mmc_irqs(host, TMIO_MASK_READOP);
+ else
+ tmio_check_bounce_buffer(host);
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
host->mrq);
} else {
@@ -254,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
{
- struct mmc_data *data = host->data;
+ struct mmc_data *data;
+ spin_lock(&host->lock);
+ data = host->data;
if (!data)
- return;
+ goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
/*
@@ -278,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
} else {
tmio_mmc_do_data_irq(host);
}
+out:
+ spin_unlock(&host->lock);
}
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -286,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
struct mmc_command *cmd = host->cmd;
int i, addr;
+ spin_lock(&host->lock);
+
if (!host->cmd) {
pr_debug("Spurious CMD irq\n");
- return;
+ goto out;
}
host->cmd = NULL;
@@ -324,8 +648,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
if (!host->chan_rx)
enable_mmc_irqs(host, TMIO_MASK_READOP);
} else {
- struct dma_chan *chan = host->chan_tx;
- if (!chan)
+ if (!host->chan_tx)
enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
else
tasklet_schedule(&host->dma_issue);
@@ -334,13 +657,19 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
tmio_mmc_finish_request(host);
}
+out:
+ spin_unlock(&host->lock);
+
return;
}
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
unsigned int ireg, irq_mask, status;
+ unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
pr_debug("MMC IRQ begin\n");
@@ -348,6 +677,29 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
+ sdio_ireg = 0;
+ if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+ sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
+ sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
+
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
+
+ if (sdio_ireg && !host->sdio_irq_enabled) {
+ pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
+ sdio_status, sdio_irq_mask, sdio_ireg);
+ tmio_mmc_enable_sdio_irq(host->mmc, 0);
+ goto out;
+ }
+
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+ sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
+ if (sdio_ireg)
+ goto out;
+ }
+
pr_debug_status(status);
pr_debug_status(ireg);
@@ -375,8 +727,10 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
*/
/* Command completion */
- if (ireg & TMIO_MASK_CMD) {
- ack_mmc_irqs(host, TMIO_MASK_CMD);
+ if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+ ack_mmc_irqs(host,
+ TMIO_STAT_CMDRESPEND |
+ TMIO_STAT_CMDTIMEOUT);
tmio_mmc_cmd_irq(host, status);
}
@@ -407,6 +761,16 @@ out:
}
#ifdef CONFIG_TMIO_MMC_DMA
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+ if (host->sg_ptr == &host->bounce_sg) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+ memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ }
+}
+
static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
@@ -427,12 +791,39 @@ static void tmio_dma_complete(void *arg)
enable_mmc_irqs(host, TMIO_STAT_DATAEND);
}
-static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
- struct scatterlist *sg = host->sg_ptr;
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
- int ret;
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ align >= MAX_ALIGN)) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0) {
@@ -442,21 +833,21 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
}
if (desc) {
- host->desc = desc;
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- host->cookie = desc->tx_submit(desc);
- if (host->cookie < 0) {
- host->desc = NULL;
- ret = host->cookie;
+ cookie = desc->tx_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
} else {
chan->device->device_issue_pending(chan);
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, host->cookie, host->mrq);
+ __func__, host->sg_len, ret, cookie, host->mrq);
- if (!host->desc) {
+pio:
+ if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
@@ -471,24 +862,49 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
- reset(host);
- /* Fail this request, let above layers recover */
- host->mrq->cmd->error = ret;
- tmio_mmc_finish_request(host);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
- desc, host->cookie, host->sg_len);
-
- return ret > 0 ? 0 : ret;
+ desc, cookie, host->sg_len);
}
-static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{
- struct scatterlist *sg = host->sg_ptr;
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
- int ret;
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ align >= MAX_ALIGN)) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0) {
@@ -498,19 +914,19 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
}
if (desc) {
- host->desc = desc;
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- host->cookie = desc->tx_submit(desc);
- if (host->cookie < 0) {
- host->desc = NULL;
- ret = host->cookie;
+ cookie = desc->tx_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, host->cookie, host->mrq);
+ __func__, host->sg_len, ret, cookie, host->mrq);
- if (!host->desc) {
+pio:
+ if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
@@ -525,30 +941,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
- reset(host);
- /* Fail this request, let above layers recover */
- host->mrq->cmd->error = ret;
- tmio_mmc_finish_request(host);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
- desc, host->cookie);
-
- return ret > 0 ? 0 : ret;
+ desc, cookie);
}
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
- return tmio_mmc_start_dma_rx(host);
+ tmio_mmc_start_dma_rx(host);
} else {
if (host->chan_tx)
- return tmio_mmc_start_dma_tx(host);
+ tmio_mmc_start_dma_tx(host);
}
-
- return 0;
}
static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -562,6 +970,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
static void tmio_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->data)
+ goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -571,6 +985,8 @@ static void tmio_tasklet_fn(unsigned long arg)
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
}
/* It might be necessary to make filter MFD specific */
@@ -584,9 +1000,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
- host->cookie = -EINVAL;
- host->desc = NULL;
-
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
dma_cap_mask_t mask;
@@ -632,15 +1045,15 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
host->chan_rx = NULL;
dma_release_channel(chan);
}
-
- host->cookie = -EINVAL;
- host->desc = NULL;
}
#else
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+}
+
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
- return 0;
}
static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +1095,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
- return tmio_mmc_start_dma(host, data);
+ tmio_mmc_start_dma(host, data);
+
+ return 0;
}
/* Process requests from the MMC layer */
@@ -694,6 +1109,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (host->mrq)
pr_debug("request not null\n");
+ host->last_req_ts = jiffies;
+ wmb();
host->mrq = mrq;
if (mrq->data) {
@@ -703,10 +1120,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
ret = tmio_mmc_start_command(host, mrq->cmd);
- if (!ret)
+ if (!ret) {
+ schedule_delayed_work(&host->delayed_reset_work,
+ msecs_to_jiffies(2000));
return;
+ }
fail:
+ host->mrq = NULL;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
}
@@ -780,6 +1201,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
.get_cd = tmio_mmc_get_cd,
+ .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
#ifdef CONFIG_PM
@@ -864,10 +1286,15 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
goto host_free;
mmc->ops = &tmio_mmc_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA;
- mmc->caps |= pdata->capabilities;
+ mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+ mmc->max_segs;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
if (pdata->ocr_mask)
mmc->ocr_avail = pdata->ocr_mask;
else
@@ -890,12 +1317,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
goto cell_disable;
disable_mmc_irqs(host, TMIO_MASK_ALL);
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+ tmio_mmc_enable_sdio_irq(mmc, 0);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret)
goto cell_disable;
+ spin_lock_init(&host->lock);
+
+ /* Init delayed work for request timeouts */
+ INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
+
/* See if we also get DMA */
tmio_mmc_request_dma(host, pdata);
@@ -934,6 +1368,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
if (mmc) {
struct tmio_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
+ cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
free_irq(host->irq, host);
if (cell->disable)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
deleted file mode 100644
index 0fedc78e3ea..00000000000
--- a/drivers/mmc/host/tmio_mmc.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* Definitons for use with the tmio_mmc.c
- *
- * (c) 2004 Ian Molton <spyro@f2s.com>
- * (c) 2007 Ian Molton <spyro@f2s.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_RESET_SD 0xe0
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND 0x00000001
-#define TMIO_STAT_DATAEND 0x00000004
-#define TMIO_STAT_CARD_REMOVE 0x00000008
-#define TMIO_STAT_CARD_INSERT 0x00000010
-#define TMIO_STAT_SIGSTATE 0x00000020
-#define TMIO_STAT_WRPROTECT 0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A 0x00000400
-#define TMIO_STAT_CMD_IDX_ERR 0x00010000
-#define TMIO_STAT_CRCFAIL 0x00020000
-#define TMIO_STAT_STOPBIT_ERR 0x00040000
-#define TMIO_STAT_DATATIMEOUT 0x00080000
-#define TMIO_STAT_RXOVERFLOW 0x00100000
-#define TMIO_STAT_TXUNDERRUN 0x00200000
-#define TMIO_STAT_CMDTIMEOUT 0x00400000
-#define TMIO_STAT_RXRDY 0x01000000
-#define TMIO_STAT_TXRQ 0x02000000
-#define TMIO_STAT_ILL_FUNC 0x20000000
-#define TMIO_STAT_CMD_BUSY 0x40000000
-#define TMIO_STAT_ILL_ACCESS 0x80000000
-
-/* Define some IRQ masks */
-/* This is the mask used at reset by the chip */
-#define TMIO_MASK_ALL 0x837f031d
-#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
-#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
-#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
-#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
-
-
-#define enable_mmc_irqs(host, i) \
- do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
- mask &= ~((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
- } while (0)
-
-#define disable_mmc_irqs(host, i) \
- do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
- mask |= ((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
- } while (0)
-
-#define ack_mmc_irqs(host, i) \
- do { \
- sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
- } while (0)
-
-
-struct tmio_mmc_host {
- void __iomem *ctl;
- unsigned long bus_shift;
- struct mmc_command *cmd;
- struct mmc_request *mrq;
- struct mmc_data *data;
- struct mmc_host *mmc;
- int irq;
-
- /* Callbacks for clock / power control */
- void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
-
- /* pio related stuff */
- struct scatterlist *sg_ptr;
- unsigned int sg_len;
- unsigned int sg_off;
-
- struct platform_device *pdev;
-
- /* DMA support */
- struct dma_chan *chan_rx;
- struct dma_chan *chan_tx;
- struct tasklet_struct dma_complete;
- struct tasklet_struct dma_issue;
-#ifdef CONFIG_TMIO_MMC_DMA
- struct dma_async_tx_descriptor *desc;
- unsigned int dma_sglen;
- dma_cookie_t cookie;
-#endif
-};
-
-#include <linux/io.h>
-
-static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
- u16 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
- u32 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
-#include <linux/scatterlist.h>
-#include <linux/blkdev.h>
-
-static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
- struct mmc_data *data)
-{
- host->sg_len = data->sg_len;
- host->sg_ptr = data->sg;
- host->sg_off = 0;
-}
-
-static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
-{
- host->sg_ptr = sg_next(host->sg_ptr);
- host->sg_off = 0;
- return --host->sg_len;
-}
-
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
- unsigned long *flags)
-{
- local_irq_save(*flags);
- return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
-}
-
-static inline void tmio_mmc_kunmap_atomic(void *virt,
- unsigned long *flags)
-{
- kunmap_atomic(virt, KM_BIO_SRC_IRQ);
- local_irq_restore(*flags);
-}
-
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a) \
- do { \
- if (status & TMIO_STAT_##a) \
- printk(#a); \
- } while (0)
-
-void pr_debug_status(u32 status)
-{
- printk(KERN_DEBUG "status: %08x = ", status);
- STATUS_TO_TEXT(CARD_REMOVE);
- STATUS_TO_TEXT(CARD_INSERT);
- STATUS_TO_TEXT(SIGSTATE);
- STATUS_TO_TEXT(WRPROTECT);
- STATUS_TO_TEXT(CARD_REMOVE_A);
- STATUS_TO_TEXT(CARD_INSERT_A);
- STATUS_TO_TEXT(SIGSTATE_A);
- STATUS_TO_TEXT(CMD_IDX_ERR);
- STATUS_TO_TEXT(STOPBIT_ERR);
- STATUS_TO_TEXT(ILL_FUNC);
- STATUS_TO_TEXT(CMD_BUSY);
- STATUS_TO_TEXT(CMDRESPEND);
- STATUS_TO_TEXT(DATAEND);
- STATUS_TO_TEXT(CRCFAIL);
- STATUS_TO_TEXT(DATATIMEOUT);
- STATUS_TO_TEXT(CMDTIMEOUT);
- STATUS_TO_TEXT(RXOVERFLOW);
- STATUS_TO_TEXT(TXUNDERRUN);
- STATUS_TO_TEXT(RXRDY);
- STATUS_TO_TEXT(TXRQ);
- STATUS_TO_TEXT(ILL_ACCESS);
- printk("\n");
-}
-
-#else
-#define pr_debug_status(s) do { } while (0)
-#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3fda24a28d2..ff652c77a0a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,19 +1944,12 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
- MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+ MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
-config FEC2
- bool "Second FEC ethernet controller (on some ColdFire CPUs)"
- depends on FEC
- help
- Say Y here if you want to use the second built-in 10/100 Fast
- ethernet controller on some Motorola ColdFire processors.
-
config FEC_MPC52xx
tristate "MPC52xx FEC driver"
depends on PPC_MPC52xx && PPC_BESTCOMM
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9d06f..0b9fc5173ae 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
* Licensed under the GPL-2 or later.
*/
+#define DRV_VERSION "1.1"
+#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -41,12 +46,7 @@
#include "bfin_mac.h"
-#define DRV_NAME "bfin_mac"
-#define DRV_VERSION "1.1"
-#define DRV_AUTHOR "Bryan Wu, Luke Yang"
-#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
-
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("Bryan Wu, Luke Yang");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@ static int desc_list_init(void)
/* allocate a new skb for next time receive */
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- printk(KERN_NOTICE DRV_NAME
- ": init: low on mem - packet dropped\n");
+ pr_notice("init: low on mem - packet dropped\n");
goto init_error;
}
skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@ static int desc_list_init(void)
init_error:
desc_list_free();
- printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+ pr_err("kmalloc failed\n");
return -ENOMEM;
}
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void)
while ((bfin_read_EMAC_STAADD()) & STABUSY) {
udelay(1);
if (timeout_cnt-- < 0) {
- printk(KERN_ERR DRV_NAME
- ": wait MDC/MDIO transaction to complete timeout\n");
+ pr_err("wait MDC/MDIO transaction to complete timeout\n");
return -ETIMEDOUT;
}
}
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev)
opmode &= ~RMII_10;
break;
default:
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not 10/100!\n",
- DRV_NAME, phydev->speed);
+ netdev_warn(dev,
+ "Ack! Speed (%d) is not 10/100!\n",
+ phydev->speed);
break;
}
bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode)
/* now we are supposed to have a proper phydev, to attach to... */
if (!phydev) {
- printk(KERN_INFO "%s: Don't found any phy device at all\n",
- dev->name);
+ netdev_err(dev, "no phy device found\n");
return -ENODEV;
}
if (phy_mode != PHY_INTERFACE_MODE_RMII &&
phy_mode != PHY_INTERFACE_MODE_MII) {
- printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+ netdev_err(dev, "invalid phy interface mode\n");
return -EINVAL;
}
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode)
0, phy_mode);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "could not attach PHY\n");
return PTR_ERR(phydev);
}
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode)
lp->old_duplex = -1;
lp->phydev = phydev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
- "@sclk=%dMHz)\n",
- DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
- MDC_CLK, mdc_div, sclk/1000000);
+ pr_info("attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
+ MDC_CLK, mdc_div, sclk/1000000);
return 0;
}
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
+ strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->fw_version, "N/A");
strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
};
/**************************************************************************/
-void setup_system_regs(struct net_device *dev)
+static void setup_system_regs(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
int i;
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev)
bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
+ /* Set vlan regs to let 1522 bytes long packets pass through */
+ bfin_write_EMAC_VLAN1(lp->vlan1_mask);
+ bfin_write_EMAC_VLAN2(lp->vlan2_mask);
+
/* Initialize the TX DMA channel registers */
bfin_write_DMA2_X_COUNT(0);
bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
udelay(1);
if (timeout_cnt == 0)
- printk(KERN_ERR DRV_NAME
- ": fails to timestamp the TX packet\n");
+ netdev_err(netdev, "timestamp the TX packet failed\n");
else {
struct skb_shared_hwtstamps shhwtstamps;
u64 ns;
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev)
* we which case we simply drop the packet
*/
if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
- printk(KERN_NOTICE DRV_NAME
- ": rx: receive error - packet dropped\n");
+ netdev_notice(dev, "rx: receive error - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- printk(KERN_NOTICE DRV_NAME
- ": rx: low on mem - packet dropped\n");
+ netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev)
int ret;
u32 opmode;
- pr_debug("%s: %s\n", DRV_NAME, __func__);
+ pr_debug("%s\n", __func__);
/* Set RX DMA */
bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1323,7 +1320,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
u32 sysctl;
if (dev->flags & IFF_PROMISC) {
- printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+ netdev_info(dev, "set promisc mode\n");
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PR;
bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1390,7 @@ static int bfin_mac_open(struct net_device *dev)
* address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
*/
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+ netdev_warn(dev, "no valid ethernet hw addr\n");
return -EINVAL;
}
@@ -1527,6 +1524,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_mii_probe;
}
+ lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
+ lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
+
/* Fill in the fields of the device structure with ethernet values. */
ether_setup(ndev);
@@ -1558,7 +1558,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
bfin_mac_hwtstamp_init(ndev);
/* now, print out the card info, in a short format.. */
- dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
+ netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
return 0;
@@ -1650,7 +1650,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
* so set the GPIO pins to Ethernet mode
*/
pin_req = mii_bus_pd->mac_peripherals;
- rc = peripheral_request_list(pin_req, DRV_NAME);
+ rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
if (rc) {
dev_err(&pdev->dev, "Requesting peripherals failed!\n");
return rc;
@@ -1739,7 +1739,7 @@ static struct platform_driver bfin_mac_driver = {
.resume = bfin_mac_resume,
.suspend = bfin_mac_suspend,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68bed236..f8559ac9a40 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
#include <linux/etherdevice.h>
#include <linux/bfin_mac.h>
+/*
+ * Disable hardware checksum for bug #5600 if writeback cache is
+ * enabled. Otherwize, corrupted RX packet will be sent up stack
+ * without error mark.
+ */
+#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
#define BFIN_MAC_CSUM_OFFLOAD
+#endif
#define TX_RECLAIM_JIFFIES (HZ / 5)
@@ -68,7 +75,6 @@ struct bfin_mac_local {
*/
struct net_device_stats stats;
- unsigned char Mac[6]; /* MAC address of the board */
spinlock_t lock;
int wol; /* Wake On Lan */
@@ -76,6 +82,9 @@ struct bfin_mac_local {
struct timer_list tx_reclaim_timer;
struct net_device *ndev;
+ /* Data for EMAC_VLAN1 regs */
+ u16 vlan1_mask, vlan2_mask;
+
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
int old_speed;
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 77d6c8d6d86..6a858a29db5 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -636,6 +636,7 @@ struct bnx2x_common {
#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
+#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
int flash_size;
#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25ca9e..fb3ff7c4d7c 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2009 Broadcom Corporation
+ * Copyright (c) 2011 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
*/
@@ -17,53 +23,53 @@
#define BNX2X_DUMP_H
-struct dump_sign {
- u32 time_stamp;
- u32 diag_ver;
- u32 grc_dump_ver;
-};
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define TSTORM_CAM_MODE 0x1b1440
+/*definitions */
+#define XSTORM_WAITP_ADDR 0x2b8a80
+#define TSTORM_WAITP_ADDR 0x1b8a80
+#define USTORM_WAITP_ADDR 0x338a80
+#define CSTORM_WAITP_ADDR 0x238a80
+#define TSTORM_CAM_MODE 0x1B1440
-#define RI_E1 0x1
-#define RI_E1H 0x2
+#define MAX_TIMER_PENDING 200
+#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1 0x1
+#define RI_E1H 0x2
#define RI_E2 0x4
-#define RI_ONLINE 0x100
+#define RI_ONLINE 0x100
#define RI_PATH0_DUMP 0x200
#define RI_PATH1_DUMP 0x400
-#define RI_E1_OFFLINE (RI_E1)
-#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE (RI_E1H)
-#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE (RI_E2)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
-#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1_OFFLINE (RI_E1)
+#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
+#define RI_E1H_OFFLINE (RI_E1H)
+#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE (RI_E2)
+#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
+#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+struct dump_sign {
+ u32 time_stamp;
+ u32 diag_ver;
+ u32 grc_dump_ver;
+};
struct dump_hdr {
- u32 hdr_size; /* in dwords, excluding this field */
- struct dump_sign dump_sign;
- u32 xstorm_waitp;
- u32 tstorm_waitp;
- u32 ustorm_waitp;
- u32 cstorm_waitp;
- u16 info;
- u8 idle_chk;
- u8 reserved;
+ u32 hdr_size; /* in dwords, excluding this field */
+ struct dump_sign dump_sign;
+ u32 xstorm_waitp;
+ u32 tstorm_waitp;
+ u32 ustorm_waitp;
+ u32 cstorm_waitp;
+ u16 info;
+ u8 idle_chk;
+ u8 reserved;
};
struct reg_addr {
@@ -80,202 +86,185 @@ struct wreg_addr {
u16 info;
};
-
-#define REGS_COUNT 558
+#define REGS_COUNT 834
static const struct reg_addr reg_addrs[REGS_COUNT] = {
{ 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
{ 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
- { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
- { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
- { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
- { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
- { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
- { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
- { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
- { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
- { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
- { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
- { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
- { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
- { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
- { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
- { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
- { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
- { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
- { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
- { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
- { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
- { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
- { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
- { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
- { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
- { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
- { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
- { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
- { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
- { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
- { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
- { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
- { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
- { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
- { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
- { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
- { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
- { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
- { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
- { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
- { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
- { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
- { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
- { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
+ { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
+ { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
+ { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
+ { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
+ { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
+ { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
+ { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
+ { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
+ { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
+ { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
+ { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
+ { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
+ { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
+ { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
+ { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
+ { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
+ { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
+ { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
+ { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
+ { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
+ { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
+ { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
+ { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
+ { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
+ { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
+ { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
+ { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
+ { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
+ { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
+ { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
+ { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
+ { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
+ { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
+ { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
+ { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
+ { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
+ { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
+ { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
+ { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
+ { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
+ { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
+ { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
+ { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
+ { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
+ { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
+ { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
+ { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
+ { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
+ { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
+ { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
+ { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
+ { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
+ { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
+ { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
+ { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
+ { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
+ { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
+ { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
+ { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
+ { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
{ 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
{ 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
- { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
- { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
- { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
- { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
- { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
- { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
- { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
- { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
+ { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
+ { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
+ { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
+ { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
+ { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
+ { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
+ { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
+ { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
+ { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
+ { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
+ { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
+ { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
+ { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
{ 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
{ 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
- { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
+ { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
+ { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
+ { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
+ { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
+ { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
+ { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
{ 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
- { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
- { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
- { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
- { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
- { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
- { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
- { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
- { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
- { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
+ { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
+ { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
+ { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
+ { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
+ { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
+ { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
{ 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
{ 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
- { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
- { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
- { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
+ { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
+ { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
+ { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
+ { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
{ 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
{ 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
- { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
- { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
- { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
- { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
- { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
- { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
- { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
- { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
+ { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
+ { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
+ { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
+ { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
+ { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
+ { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
+ { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
+ { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
+ { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
+ { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
{ 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
{ 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
{ 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
{ 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
- { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
- { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
- { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
+ { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
+ { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
+ { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
{ 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
- { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
- { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
+ { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
+ { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
+ { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
+ { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
{ 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
- { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
- { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
- { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
- { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
- { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
- { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
- { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
- { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
- { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
- { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
- { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
- { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
- { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
- { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
- { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
- { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
- { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
- { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
- { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
- { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
- { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
- { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
- { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
- { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
- { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
- { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
- { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
- { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
- { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
- { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
- { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
- { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
- { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
- { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
- { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
- { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
- { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
- { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
- { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
- { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
- { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
- { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
- { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
- { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
- { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
- { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
- { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
- { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
- { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
- { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
- { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
- { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
- { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
- { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
- { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
- { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
- { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
- { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
- { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
- { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
- { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
- { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
- { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
- { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
- { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
- { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
- { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
- { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
- { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
- { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
- { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
- { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
- { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
- { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
- { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
- { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
- { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
- { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
- { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
- { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
- { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
- { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
- { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
- { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
- { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
+ { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
+ { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
+ { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
+ { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
+ { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
+ { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
+ { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
+ { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
+ { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
+ { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
+ { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
+ { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
+ { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
+ { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
+ { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
+ { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
+ { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
+ { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
+ { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
+ { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
+ { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
+ { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
+ { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
+ { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
+ { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
+ { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
+ { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
+ { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
+ { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
+ { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
+ { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
+ { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
+ { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
+ { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
+ { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
+ { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
+ { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
+ { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
+ { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
+ { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
+ { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
+ { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
+ { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
+ { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
+ { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
+ { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
+ { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
+ { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
+ { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
+ { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
+ { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
{ 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
{ 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
{ 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = {
{ 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
{ 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
{ 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
- { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
+ { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
{ 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
- { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
- { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
- { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
- { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
- { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
- { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
- { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
- { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
- { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
- { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
- { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
- { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
- { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
- { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
+ { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
+ { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
+ { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
+ { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
+ { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
+ { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
+ { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
+ { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
+ { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
+ { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
+ { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
+ { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
+ { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
+ { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
+ { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
+ { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
+ { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
+ { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
+ { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
+ { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
+ { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
+ { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
+ { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
+ { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
+ { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
+ { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
{ 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
- { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
- { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
- { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
- { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
- { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
+ { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
+ { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
+ { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
+ { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
+ { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
+ { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
+ { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
+ { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
+ { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
+ { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
+ { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
+ { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
+ { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
+ { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
+ { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
+ { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
+ { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
{ 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
- { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
- { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
- { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
- { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
- { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
- { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
- { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
- { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
- { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
- { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
- { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
- { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
- { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
- { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
- { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
- { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
- { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
- { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
- { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
- { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
- { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
- { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
- { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
- { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
- { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
- { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
- { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
- { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
- { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
- { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
+ { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
+ { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
+ { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
+ { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
+ { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
+ { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
+ { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
+ { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
+ { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
+ { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
+ { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
+ { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
+ { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
+ { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
+ { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
+ { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
+ { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
+ { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
+ { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
+ { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
+ { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
+ { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
+ { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
+ { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
+ { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
+ { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
+ { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
+ { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
+ { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
+ { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
+ { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
+ { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
+ { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
+ { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
+ { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
+ { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
+ { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
+ { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
+ { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
+ { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
+ { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
+ { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
+ { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
+ { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
+ { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
+ { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
+ { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
+ { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
+ { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
+ { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
+ { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
+ { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
+ { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
+ { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
+ { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
+ { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
+ { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
+ { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
+ { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
+ { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
+ { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
+ { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
+ { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
+ { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
+ { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
+ { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
+ { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
+ { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
+ { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
+ { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
+ { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
+ { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
+ { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
+ { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
+ { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
+ { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
+ { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
+ { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
+ { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
+ { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
+ { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
+ { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
+ { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
+ { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
+ { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
+ { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
+ { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
+ { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
+ { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
+ { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
+ { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
+ { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
+ { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
+ { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
+ { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
+ { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
+ { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
+ { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
+ { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
+ { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
+ { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
+ { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
+ { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
+ { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
+ { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
+ { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
+ { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
+ { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
+ { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
+ { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
+ { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
+ { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
+ { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
+ { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
+ { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
+ { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
+ { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
+ { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
+ { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
+ { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
+ { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
+ { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
+ { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
+ { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
+ { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
+ { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
+ { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
+ { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
{ 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
+ { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
+ { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
+ { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
+ { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
{ 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
- { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
- { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
- { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
- { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
- { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
+ { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
+ { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
+ { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
+ { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
+ { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
+ { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
+ { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
+ { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
+ { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
+ { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
+ { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
+ { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
+ { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
+ { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
+ { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
+ { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
{ 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
+ { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
+ { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
+ { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
+ { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
+ { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
+ { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
+ { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
+ { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
+ { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
+ { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
+ { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
+ { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
+ { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
+ { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
+ { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
+ { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
+ { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
+ { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
+ { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
+ { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
+ { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
+ { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
+ { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
+ { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
+ { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
+ { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
};
-
-#define IDLE_REGS_COUNT 277
+#define IDLE_REGS_COUNT 237
static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
- { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
- { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
- { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
+ { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
+ { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
+ { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
+ { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
+ { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
+ { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
+ { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
+ { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
{ 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
- { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
- { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
- { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
- { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
- { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
- { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
- { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
- { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
- { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
- { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
- { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
- { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
- { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
- { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
- { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
- { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
- { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
- { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
- { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
- { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
- { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
- { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
- { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
- { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
- { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
- { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
- { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
- { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
- { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
- { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
- { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
- { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
- { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
+ { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
+ { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
+ { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
+ { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
+ { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
+ { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
+ { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
+ { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
+ { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
+ { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
+ { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
+ { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
+ { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
+ { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
+ { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
+ { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
+ { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
+ { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
+ { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
+ { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
+ { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
+ { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
+ { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
+ { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
+ { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
+ { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
+ { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
+ { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
+ { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
+ { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
+ { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
+ { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
+ { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
+ { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
+ { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
+ { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
+ { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
{ 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
{ 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
{ 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
- { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
- { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
- { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
- { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
- { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
- { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
- { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
- { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
- { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
- { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
- { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
- { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
- { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
- { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
- { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
- { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
- { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
- { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
- { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
- { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
- { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
- { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
- { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
- { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
- { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
- { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
- { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
- { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
- { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
- { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
- { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
- { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
- { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
- { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
- { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
- { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
- { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
- { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
- { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
+ { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
+ { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
+ { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
+ { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
+ { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
+ { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
+ { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
+ { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
{ 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
{ 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
{ 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
{ 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
{ 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
{ 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
- { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
- { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
- { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
- { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
- { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
- { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
- { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
- { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
- { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
- { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
- { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
- { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
- { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
- { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
+ { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
+ { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
+ { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
+ { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
+ { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
+ { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
+ { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
+ { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
+ { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
+ { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
+ { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
+ { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
+ { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
+ { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
+ { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
+ { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
+ { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
+ { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
{ 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
- { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
- { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
- { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
- { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
- { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
- { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
- { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
- { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
- { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
- { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
- { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
- { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
- { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
- { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
- { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
- { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
- { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
- { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
- { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
- { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
+ { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
+ { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
+ { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
+ { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
+ { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
+ { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
+ { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
+ { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
+ { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
+ { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
+ { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
+ { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
+ { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
+ { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
+ { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
{ 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
{ 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
- { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
- { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
{ 0x3380c0, 1, RI_ALL_ONLINE }
};
@@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
{ 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
};
-
#define WREGS_COUNT_E1H 1
static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
@@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
};
-static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
-
+static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
#define TIMER_REGS_COUNT_E1 2
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
- { 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
- { 0x1640d0, 0x1640d4 };
+static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
+ 0x1640d0, 0x1640d4 };
#define TIMER_REGS_COUNT_E1H 2
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
- { 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
- { 0x1640d0, 0x1640d4 };
+static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+ 0x1640d0, 0x1640d4 };
+
+#define TIMER_REGS_COUNT_E2 2
+
+static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
+ 0x1640d0, 0x1640d4 };
+
+#define PAGE_MODE_VALUES_E1 0
+
+#define PAGE_READ_REGS_E1 0
+
+#define PAGE_WRITE_REGS_E1 0
+
+static const u32 page_vals_e1[] = { 0 };
+
+static const u32 page_write_regs_e1[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
+
+#define PAGE_MODE_VALUES_E1H 0
+
+#define PAGE_READ_REGS_E1H 0
+
+#define PAGE_WRITE_REGS_E1H 0
+
+static const u32 page_vals_e1h[] = { 0 };
+
+static const u32 page_write_regs_e1h[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1h[] = {
+ { 0x0, 0, RI_E1H_ONLINE } };
#define PAGE_MODE_VALUES_E2 2
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 99c672d894c..5b44a8b4850 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,7 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
/* Note: in the format strings below %s is replaced by the queue-name which is
* either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -472,7 +473,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
- int i;
+ int i, j, k;
if (CHIP_IS_E1(bp)) {
for (i = 0; i < REGS_COUNT; i++)
@@ -502,6 +503,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
regdump_len += wreg_addrs_e2[i].size *
(1 + wreg_addrs_e2[i].read_regs_count);
+
+ for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
+ for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
+ for (k = 0; k < PAGE_READ_REGS_E2; k++)
+ if (IS_E2_ONLINE(page_read_regs_e2[k].
+ info))
+ regdump_len +=
+ page_read_regs_e2[k].size;
+ }
}
regdump_len *= 4;
regdump_len += sizeof(struct dump_hdr);
@@ -539,6 +549,12 @@ static void bnx2x_get_regs(struct net_device *dev,
if (!netif_running(bp->dev))
return;
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+ bnx2x_disable_blocks_parity(bp);
+
dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
dump_hdr.dump_sign = dump_sign_all;
dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -580,6 +596,10 @@ static void bnx2x_get_regs(struct net_device *dev,
bnx2x_read_pages_regs_e2(bp, p);
}
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ if (CHIP_PARITY_ENABLED(bp))
+ bnx2x_enable_blocks_parity(bp);
}
#define PHY_FW_VER_LEN 20
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d54874a55..5a268e9a089 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@ struct src_ent {
u64 next;
};
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK, \
+ block##_REG_##block##_PRTY_STS_CLR, \
+ en_mask, {m1, m1h, m2}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK_0, \
+ block##_REG_##block##_PRTY_STS_CLR_0, \
+ en_mask, {m1, m1h, m2}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK_1, \
+ block##_REG_##block##_PRTY_STS_CLR_1, \
+ en_mask, {m1, m1h, m2}, #block"_1" \
+}
+
+static const struct {
+ u32 mask_addr;
+ u32 sts_clr_addr;
+ u32 en_mask; /* Mask to enable parity attentions */
+ struct {
+ u32 e1; /* 57710 */
+ u32 e1h; /* 57711 */
+ u32 e2; /* 57712 */
+ } reg_mask; /* Register mask (all valid bits) */
+ char name[7]; /* Block's longest name is 6 characters long
+ * (name + suffix)
+ */
+} bnx2x_blocks_parity_data[] = {
+ /* bit 19 masked */
+ /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+ /* bit 5,18,20-31 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+ /* bit 5 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
+ /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+ /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+ /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+ * want to handle "system kill" flow at the moment.
+ */
+ BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "UPB"},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+ int i;
+ u32 reg_val;
+
+ for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+ if (enable)
+ reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ else
+ reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+ REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ }
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+ if (CHIP_IS_E1(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+ else if (CHIP_IS_E1H(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+ else
+ return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (dis_mask) {
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ dis_mask);
+ DP(NETIF_MSG_HW, "Setting parity mask "
+ "for %s to\t\t0x%x\n",
+ bnx2x_blocks_parity_data[i].name, dis_mask);
+ }
+ }
+
+ /* Disable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+ u32 reg_val, mcp_aeu_bits =
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+ /* Clear SEM_FAST parities */
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask) {
+ reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+ sts_clr_addr);
+ if (reg_val & reg_mask)
+ DP(NETIF_MSG_HW,
+ "Parity errors in %s: 0x%x\n",
+ bnx2x_blocks_parity_data[i].name,
+ reg_val & reg_mask);
+ }
+ }
+
+ /* Check if there were parity attentions in MCP */
+ reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+ if (reg_val & mcp_aeu_bits)
+ DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+ reg_val & mcp_aeu_bits);
+
+ /* Clear parity attentions in MCP:
+ * [7] clears Latched rom_parity
+ * [8] clears Latched ump_rx_parity
+ * [9] clears Latched ump_tx_parity
+ * [10] clears Latched scpad_parity (both ports)
+ */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask)
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+ }
+
+ /* Enable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, true);
+}
+
+
#endif /* BNX2X_INIT_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 489a5512a04..84e1af4d65e 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -3152,7 +3152,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
/*
* should be run under rtnl lock
@@ -3527,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */
bnx2x_acquire_alr(bp);
- if (bnx2x_chk_parity_attn(bp)) {
+ if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
bp->recovery_state = BNX2X_RECOVERY_INIT;
bnx2x_set_reset_in_progress(bp);
schedule_delayed_work(&bp->reset_task, 0);
@@ -4754,7 +4753,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
return 0; /* OK */
}
-static void enable_blocks_attention(struct bnx2x *bp)
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
{
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
if (CHIP_IS_E2(bp))
@@ -4808,53 +4807,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
- REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
+ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
}
-static const struct {
- u32 addr;
- u32 mask;
-} bnx2x_parity_mask[] = {
- {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
- {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
- {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
- {HC_REG_HC_PRTY_MASK, 0x7},
- {MISC_REG_MISC_PRTY_MASK, 0x1},
- {QM_REG_QM_PRTY_MASK, 0x0},
- {DORQ_REG_DORQ_PRTY_MASK, 0x0},
- {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
- {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
- {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
- {CDU_REG_CDU_PRTY_MASK, 0x0},
- {CFC_REG_CFC_PRTY_MASK, 0x0},
- {DBG_REG_DBG_PRTY_MASK, 0x0},
- {DMAE_REG_DMAE_PRTY_MASK, 0x0},
- {BRB1_REG_BRB1_PRTY_MASK, 0x0},
- {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
- {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
- {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
- {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
- {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
- {USEM_REG_USEM_PRTY_MASK_0, 0x0},
- {USEM_REG_USEM_PRTY_MASK_1, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
- REG_WR(bp, bnx2x_parity_mask[i].addr,
- bnx2x_parity_mask[i].mask);
-}
-
-
static void bnx2x_reset_common(struct bnx2x *bp)
{
/* reset_common */
@@ -5350,9 +5305,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
/* clear PXP2 attentions */
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
- enable_blocks_attention(bp);
- if (CHIP_PARITY_SUPPORTED(bp))
- enable_blocks_parity(bp);
+ bnx2x_enable_blocks_attention(bp);
+ if (CHIP_PARITY_ENABLED(bp))
+ bnx2x_enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
/* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -8751,13 +8706,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, "
"must load devices in order!\n");
- /* Set multi queue mode */
- if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
- ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
- "requested is not MSI-X\n");
- multi_mode = ETH_RSS_MODE_DISABLED;
- }
bp->multi_mode = multi_mode;
bp->int_mode = int_mode;
@@ -9560,9 +9508,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
+ /* Power on: we can't let PCI layer write to us while we are in D3 */
+ bnx2x_set_power_state(bp, PCI_D0);
+
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->reset_task);
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index bfd875b7290..38ef7ca9f21 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
* WR - Write Clear (write 1 to clear the bit)
*
*/
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
@@ -39,6 +41,8 @@
#define BRB1_REG_BRB1_PRTY_MASK 0x60138
/* [R 4] Parity register #0 read */
#define BRB1_REG_BRB1_PRTY_STS 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
* address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
* BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
#define CCM_REG_CCM_INT_MASK 0xd01e4
/* [R 11] Interrupt register #0 read */
#define CCM_REG_CCM_INT_STS 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK 0xd01f4
/* [R 27] Parity register #0 read */
#define CCM_REG_CCM_PRTY_STS 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
#define CDU_REG_CDU_PRTY_MASK 0x10104c
/* [R 5] Parity register #0 read */
#define CDU_REG_CDU_PRTY_STS 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
/* [RC 32] logging of error data in case of a CDU load error:
{expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
#define CFC_REG_CFC_PRTY_MASK 0x104118
/* [R 4] Parity register #0 read */
#define CFC_REG_CFC_PRTY_STS 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
#define CFC_REG_CID_CAM 0x104800
#define CFC_REG_CONTROL0 0x104028
@@ -466,6 +478,8 @@
#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
/* [R 11] Parity register #0 read */
#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
#define CSDM_REG_ENABLE_IN1 0xc2238
#define CSDM_REG_ENABLE_IN2 0xc223c
#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -556,6 +570,9 @@
/* [R 32] Parity register #0 read */
#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
#define CSEM_REG_ENABLE_IN 0x2000a4
#define CSEM_REG_ENABLE_OUT 0x2000a8
/* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
#define DBG_REG_DBG_PRTY_MASK 0xc0a8
/* [R 1] Parity register #0 read */
#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
* function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
* 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
#define DMAE_REG_DMAE_PRTY_MASK 0x102064
/* [R 4] Parity register #0 read */
#define DMAE_REG_DMAE_PRTY_STS 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
/* [RW 1] Command 0 go. */
#define DMAE_REG_GO_C0 0x102080
/* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
#define DORQ_REG_DORQ_PRTY_MASK 0x170190
/* [R 2] Parity register #0 read */
#define DORQ_REG_DORQ_PRTY_STS 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
/* [RW 8] The address to write the DPM CID to STORM. */
#define DORQ_REG_DPM_CID_ADDR 0x170044
/* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
/* [R 1] data availble for error memory. If this bit is clear do not red
* from error_handling_memory. */
#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK 0x1300a8
/* [R 11] Parity register #0 read */
#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
/* [R 4] Debug: int_handle_fsm */
#define IGU_REG_INT_HANDLE_FSM 0x130050
#define IGU_REG_LEADING_EDGE_LATCH 0x130134
@@ -1501,6 +1528,8 @@
#define MISC_REG_MISC_PRTY_MASK 0xa398
/* [R 1] Parity register #0 read */
#define MISC_REG_MISC_PRTY_STS 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
#define MISC_REG_NIG_WOL_P0 0xa270
#define MISC_REG_NIG_WOL_P1 0xa274
/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -2082,6 +2111,10 @@
#define PBF_REG_PBF_INT_MASK 0x1401d4
/* [R 5] Interrupt register #0 read */
#define PBF_REG_PBF_INT_STS 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
#define PB_REG_CONTROL 0
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
@@ -2091,6 +2124,8 @@
#define PB_REG_PB_PRTY_MASK 0x38
/* [R 4] Parity register #0 read */
#define PB_REG_PB_PRTY_STS 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR 0x30
#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
@@ -2446,6 +2481,8 @@
#define PRS_REG_PRS_PRTY_MASK 0x401a4
/* [R 8] Parity register #0 read */
#define PRS_REG_PRS_PRTY_STS 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
request message */
#define PRS_REG_PURE_REGIONS 0x40024
@@ -2599,6 +2636,9 @@
/* [R 32] Parity register #0 read */
#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
indication about backpressure) */
#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -3001,6 +3041,8 @@
#define PXP_REG_PXP_PRTY_MASK 0x103094
/* [R 26] Parity register #0 read */
#define PXP_REG_PXP_PRTY_STS 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
/* [RW 4] The activity counter initial increment value sent in the load
request */
#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -3157,6 +3199,8 @@
#define QM_REG_QM_PRTY_MASK 0x168454
/* [R 12] Parity register #0 read */
#define QM_REG_QM_PRTY_STS 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR 0x16844c
/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
#define QM_REG_QSTATUS_HIGH 0x16802c
/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3442,6 +3486,8 @@
#define QM_REG_WRRWEIGHTS_9 0x168848
/* [R 6] Keep the fill level of the fifo from write client 1 */
#define QM_REG_XQM_WRC_FIFOLVL 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST 0x18840
#define SRC_REG_COUNTFREE0 0x40500
/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
ports. If set the searcher support 8 functions. */
@@ -3470,6 +3516,8 @@
#define SRC_REG_SRC_PRTY_MASK 0x404c8
/* [R 3] Parity register #0 read */
#define SRC_REG_SRC_PRTY_STS 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
#define TCM_REG_CAM_OCCUP 0x5017c
/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3596,8 +3644,12 @@
#define TCM_REG_TCM_INT_MASK 0x501dc
/* [R 11] Interrupt register #0 read */
#define TCM_REG_TCM_INT_STS 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK 0x501ec
/* [R 27] Parity register #0 read */
#define TCM_REG_TCM_PRTY_STS 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -3755,6 +3807,10 @@
#define TM_REG_TM_INT_MASK 0x1640fc
/* [R 1] Interrupt register #0 read */
#define TM_REG_TM_INT_STS 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR 0x164104
/* [RW 8] The event id for aggregated interrupt 0 */
#define TSDM_REG_AGG_INT_EVENT_0 0x42038
#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@@ -3835,6 +3891,8 @@
#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
/* [R 11] Parity register #0 read */
#define TSDM_REG_TSDM_PRTY_STS 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
/* [RW 5] The number of time_slots in the arbitration cycle */
#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3914,6 +3972,9 @@
#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
/* [RW 8] List of free threads . There is a bit per thread. */
#define TSEM_REG_THREADS_LIST 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
/* [RW 3] The arbitration scheme of time_slot 0 */
#define TSEM_REG_TS_0_AS 0x180038
/* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4116,6 +4177,8 @@
#define UCM_REG_UCM_INT_STS 0xe01c8
/* [R 27] Parity register #0 read */
#define UCM_REG_UCM_PRTY_STS 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -4292,6 +4355,8 @@
#define USDM_REG_USDM_PRTY_MASK 0xc42c0
/* [R 11] Parity register #0 read */
#define USDM_REG_USDM_PRTY_STS 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
/* [RW 5] The number of time_slots in the arbitration cycle */
#define USEM_REG_ARB_CYCLE_SIZE 0x300034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4421,6 +4486,9 @@
/* [R 32] Parity register #0 read */
#define USEM_REG_USEM_PRTY_STS_0 0x300124
#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
* VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
#define USEM_REG_VFPF_ERR_NUM 0x300380
@@ -4797,6 +4865,8 @@
#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
/* [R 11] Parity register #0 read */
#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
/* [RW 5] The number of time_slots in the arbitration cycle */
#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4929,6 +4999,9 @@
/* [R 32] Parity register #0 read */
#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -6316,3 +6389,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
}
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6e4d9b144cc..bda60d590fa 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,6 +158,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
spin_lock_bh(&bp->stats_lock);
+ if (bp->stats_pending) {
+ spin_unlock_bh(&bp->stats_lock);
+ return;
+ }
+
ramrod_data.drv_counter = bp->stats_counter++;
ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
for_each_eth_queue(bp, i)
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 3c403f89575..56166ae2059 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev)
netif_set_real_num_tx_queues(dev, pi->nqsets);
err = netif_set_real_num_rx_queues(dev, pi->nqsets);
if (err)
- return err;
- set_bit(pi->port_id, &adapter->open_device_map);
+ goto err_unwind;
err = link_start(dev);
if (err)
- return err;
+ goto err_unwind;
+
netif_tx_start_all_queues(dev);
+ set_bit(pi->port_id, &adapter->open_device_map);
return 0;
+
+err_unwind:
+ if (adapter->open_device_map == 0)
+ adapter_down(adapter);
+ return err;
}
/*
@@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
*/
static int cxgb4vf_stop(struct net_device *dev)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
pi->link_cfg.link_ok = 0;
clear_bit(pi->port_id, &adapter->open_device_map);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e4bec78c8e3..0f51c80475c 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/*
* Write the command array into the Mailbox Data register array and
* transfer ownership of the mailbox to the firmware.
+ *
+ * For the VFs, the Mailbox Data "registers" are actually backed by
+ * T4's "MA" interface rather than PL Registers (as is the case for
+ * the PFs). Because these are in different coherency domains, the
+ * write to the VF's PL-register-backed Mailbox Control can race in
+ * front of the writes to the MA-backed VF Mailbox Data "registers".
+ * So we need to do a read-back on at least one byte of the VF Mailbox
+ * Data registers before doing the write to the VF Mailbox Control
+ * register.
*/
for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+ t4_read_reg(adapter, mbox_data); /* flush write */
+
t4_write_reg(adapter, mbox_ctl,
MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 77d08e697b7..aed223b1b89 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -130,10 +130,15 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
if (hw->mac_type == e1000_82541 ||
hw->mac_type == e1000_82541_rev_2 ||
hw->mac_type == e1000_82547 ||
- hw->mac_type == e1000_82547_rev_2) {
+ hw->mac_type == e1000_82547_rev_2)
hw->phy_type = e1000_phy_igp;
- break;
- }
+ break;
+ case RTL8211B_PHY_ID:
+ hw->phy_type = e1000_phy_8211;
+ break;
+ case RTL8201N_PHY_ID:
+ hw->phy_type = e1000_phy_8201;
+ break;
default:
/* Should never have loaded on this device */
hw->phy_type = e1000_phy_undefined;
@@ -318,6 +323,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82547GI:
hw->mac_type = e1000_82547_rev_2;
break;
+ case E1000_DEV_ID_INTEL_CE4100_GBE:
+ hw->mac_type = e1000_ce4100;
+ break;
default:
/* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE;
@@ -372,6 +380,9 @@ void e1000_set_media_type(struct e1000_hw *hw)
case e1000_82542_rev2_1:
hw->media_type = e1000_media_type_fiber;
break;
+ case e1000_ce4100:
+ hw->media_type = e1000_media_type_copper;
+ break;
default:
status = er32(STATUS);
if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +471,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
/* Reset is performed on a shadow of the control register */
ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
break;
+ case e1000_ce4100:
default:
ew32(CTRL, (ctrl | E1000_CTRL_RST));
break;
@@ -952,6 +964,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
}
/**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /* SW reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_aux;
+
+ switch (hw->phy_type) {
+ case e1000_phy_8211:
+ ret_val = e1000_copper_link_rtl_setup(hw);
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ case e1000_phy_8201:
+ /* Set RMII mode */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= E1000_CTL_AUX_RMII;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+
+ /* Disable the J/K bits required for receive */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= 0x4;
+ ctrl_aux &= ~0x2;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+ ret_val = e1000_copper_link_rtl_setup(hw);
+
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ default:
+ e_dbg("Error Resetting the PHY\n");
+ return E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* e1000_copper_link_preconfig - early configuration for copper
* @hw: Struct containing variables accessed by shared code
*
@@ -1286,6 +1359,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ /* IFE/RTL8201N PHY only supports 10/100 */
+ if (hw->phy_type == e1000_phy_8201)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) {
@@ -1341,7 +1418,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
s32 ret_val;
e_dbg("e1000_copper_link_postconfig");
- if (hw->mac_type >= e1000_82544) {
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1472,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
ret_val = e1000_copper_link_mgp_setup(hw);
if (ret_val)
return ret_val;
+ } else {
+ ret_val = gbe_dhg_phy_setup(hw);
+ if (ret_val) {
+ e_dbg("gbe_dhg_phy_setup failed!\n");
+ return ret_val;
+ }
}
if (hw->autoneg) {
@@ -1461,10 +1544,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
return ret_val;
/* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val =
- e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
+ else if (hw->phy_type == e1000_phy_8201)
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
@@ -1577,9 +1661,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
- if (ret_val)
- return ret_val;
+ if (hw->phy_type == e1000_phy_8201) {
+ mii_1000t_ctrl_reg = 0;
+ } else {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
return E1000_SUCCESS;
}
@@ -1860,7 +1949,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
- if (hw->mac_type >= e1000_82544)
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
/* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1959,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
- /* Set up duplex in the Device Control and Transmit Control
- * registers depending on negotiated values.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
+ switch (hw->phy_type) {
+ case e1000_phy_8201:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
- if (phy_data & M88E1000_PSSR_DPLX)
- ctrl |= E1000_CTRL_FD;
- else
- ctrl &= ~E1000_CTRL_FD;
+ if (phy_data & RTL_PHY_CTRL_FD)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
- e1000_config_collision_dist(hw);
+ if (phy_data & RTL_PHY_CTRL_SPD_100)
+ ctrl |= E1000_CTRL_SPD_100;
+ else
+ ctrl |= E1000_CTRL_SPD_10;
- /* Set up speed in the Device Control register depending on
- * negotiated values.
- */
- if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
- ctrl |= E1000_CTRL_SPD_1000;
- else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
- ctrl |= E1000_CTRL_SPD_100;
+ e1000_config_collision_dist(hw);
+ break;
+ default:
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) ==
+ M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+ }
/* Write the configured values back to the Device Control Reg. */
ew32(CTRL, ctrl);
@@ -2401,7 +2512,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* speed/duplex on the MAC to the current PHY speed/duplex
* settings.
*/
- if (hw->mac_type >= e1000_82544)
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_ce4100))
e1000_config_collision_dist(hw);
else {
ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2850,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{
u32 i;
u32 mdic = 0;
- const u32 phy_addr = 1;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_read_phy_reg_ex");
@@ -2752,28 +2864,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_READ) |
+ (INTEL_CE_GBE_MDIC_GO));
- ew32(MDIC, mdic);
+ writel(mdic, E1000_MDIO_CMD);
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 64; i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
- return -E1000_ERR_PHY;
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+
+ mdic = readl(E1000_MDIO_STS);
+ if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+ e_dbg("MDI Read Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ } else {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
}
- *phy_data = (u16) mdic;
} else {
/* We must first send a preamble through the MDIO pin to signal the
* beginning of an MII instruction. This is done by sending 32
@@ -2840,7 +2985,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{
u32 i;
u32 mdic = 0;
- const u32 phy_addr = 1;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_write_phy_reg_ex");
@@ -2850,27 +2995,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
if (hw->mac_type > e1000_82543) {
- /* Set up Op-code, Phy Address, register address, and data intended
- * for the PHY register in the MDI Control register. The MAC will take
- * care of interfacing with the PHY to send the desired data.
+ /* Set up Op-code, Phy Address, register address, and data
+ * intended for the PHY register in the MDI Control register.
+ * The MAC will take care of interfacing with the PHY to send
+ * the desired data.
*/
- mdic = (((u32) phy_data) |
- (reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_WRITE) |
+ (INTEL_CE_GBE_MDIC_GO));
- ew32(MDIC, mdic);
+ writel(mdic, E1000_MDIO_CMD);
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 641; i++) {
- udelay(5);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write did not complete\n");
- return -E1000_ERR_PHY;
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 640; i++) {
+ udelay(5);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
}
} else {
/* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3220,11 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
if (hw->phy_id == M88E1011_I_PHY_ID)
match = true;
break;
+ case e1000_ce4100:
+ if ((hw->phy_id == RTL8211B_PHY_ID) ||
+ (hw->phy_id == RTL8201N_PHY_ID))
+ match = true;
+ break;
case e1000_82541:
case e1000_82541_rev_2:
case e1000_82547:
@@ -3291,6 +3468,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
if (hw->phy_type == e1000_phy_igp)
return e1000_phy_igp_get_info(hw, phy_info);
+ else if ((hw->phy_type == e1000_phy_8211) ||
+ (hw->phy_type == e1000_phy_8201))
+ return E1000_SUCCESS;
else
return e1000_phy_m88_get_info(hw, phy_info);
}
@@ -3742,6 +3922,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_read_eeprom");
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
@@ -3904,6 +4090,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_write_eeprom");
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c6bcd..f5514a0d5be 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -52,6 +52,7 @@ typedef enum {
e1000_82545,
e1000_82545_rev_3,
e1000_82546,
+ e1000_ce4100,
e1000_82546_rev_3,
e1000_82541,
e1000_82541_rev_2,
@@ -209,9 +210,11 @@ typedef enum {
} e1000_1000t_rx_status;
typedef enum {
- e1000_phy_m88 = 0,
- e1000_phy_igp,
- e1000_phy_undefined = 0xFF
+ e1000_phy_m88 = 0,
+ e1000_phy_igp,
+ e1000_phy_8211,
+ e1000_phy_8201,
+ e1000_phy_undefined = 0xFF
} e1000_phy_type;
typedef enum {
@@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82547EI_MOBILE 0x101A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@ struct e1000_ffvt_entry {
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
+
+extern void __iomem *ce4100_gbe_mdio_base_virt;
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
+#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
+#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
+#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
+#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
+#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
+#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
+
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@ struct e1000_ffvt_entry {
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+
+/* Auxiliary Control Register. This register is CE4100 specific,
+ * RMII/RGMII function is switched by this register - RW
+ * Following are bits definitions of the Auxiliary Control Register
+ */
+#define E1000_CTL_AUX 0x000E0
+#define E1000_CTL_AUX_END_SEL_SHIFT 10
+#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
+#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
+
+/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use CTL_AUX.ENDIANESS, packet use default */
+#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use default, packet use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* all use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
+
+#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+
+/* LW little endian, Byte big endian */
+#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+
#define E1000_RCTL 0x00100 /* RX Control - RW */
#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry {
* in more current versions of the 8254x. Despite the difference in location,
* the registers function in the same manner.
*/
+#define E1000_82542_CTL_AUX E1000_CTL_AUX
#define E1000_82542_CTRL E1000_CTRL
#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
#define E1000_82542_STATUS E1000_STATUS
@@ -1571,6 +1614,11 @@ struct e1000_hw {
#define E1000_MDIC_INT_EN 0x20000000
#define E1000_MDIC_ERROR 0x40000000
+#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
+#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
+#define INTEL_CE_GBE_MDIC_GO 0x80000000
+#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
+
#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
@@ -2871,6 +2919,11 @@ struct e1000_host_command_info {
#define M88E1111_I_PHY_ID 0x01410CC0
#define L1LXT971A_PHY_ID 0x001378E0
+#define RTL8211B_PHY_ID 0x001CC910
+#define RTL8201N_PHY_ID 0x8200
+#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
+#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
+
/* Bits...
* 15-5: page
* 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 340e12d2e4a..4ff88a683f6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
#include "e1000.h"
#include <net/ip6_checksum.h>
+#include <linux/io.h>
+
+/* Intel Media SOC GbE MDIO physical base address */
+static unsigned long ce4100_gbe_mdio_base_phy;
+/* Intel Media SOC GbE MDIO virtual base address */
+void __iomem *ce4100_gbe_mdio_base_virt;
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+ INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
/* required last entry */
{0,}
};
@@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
+ case e1000_ce4100:
case e1000_82546_rev_3:
case e1000_82541:
case e1000_82541_rev_2:
@@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter)
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
+ case e1000_ce4100:
case e1000_82546_rev_3:
pba = E1000_PBA_48K;
break;
@@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac;
u16 eeprom_data = 0;
+ u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
@@ -996,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_sw_init;
err = -EIO;
+ if (hw->mac_type == e1000_ce4100) {
+ ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
+ ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+ pci_resource_len(pdev, BAR_1));
+
+ if (!ce4100_gbe_mdio_base_virt)
+ goto err_mdio_ioremap;
+ }
if (hw->mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
@@ -1135,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* Auto detect PHY address */
+ if (hw->mac_type == e1000_ce4100) {
+ for (i = 0; i < 32; i++) {
+ hw->phy_addr = i;
+ e1000_read_phy_reg(hw, PHY_ID2, &tmp);
+ if (tmp == 0 || tmp == 0xFF) {
+ if (i == 31)
+ goto err_eeprom;
+ continue;
+ } else
+ break;
+ }
+ }
+
/* reset the hardware with the new settings */
e1000_reset(adapter);
@@ -1171,6 +1203,8 @@ err_eeprom:
kfree(adapter->rx_ring);
err_dma:
err_sw_init:
+err_mdio_ioremap:
+ iounmap(ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -1409,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
/* First rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */
if (hw->mac_type == e1000_82545 ||
+ hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75aa89..55c1711f168 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,21 @@
#ifndef _E1000_OSDEP_H_
#define _E1000_OSDEP_H_
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
+
+#define CONFIG_RAM_BASE 0x60000
+#define GBE_CONFIG_OFFSET 0x0
+
+#define GBE_CONFIG_RAM_BASE \
+ ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
+
+#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
+
+#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
+ (iowrite16_rep(base + offset, data, count))
+
+#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
+ (ioread16_rep(base + (offset << 1), data, count))
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index e57e4097ef1..cb6c7b1c1fb 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -78,6 +78,8 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -113,6 +115,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->type = e1000_phy_bm;
phy->ops.acquire = e1000_get_hw_semaphore_82574;
phy->ops.release = e1000_put_hw_semaphore_82574;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
break;
default:
return -E1000_ERR_PHY;
@@ -121,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
/* This can only be done after all function pointers are setup. */
ret_val = e1000_get_phy_id_82571(hw);
+ if (ret_val) {
+ e_dbg("Error getting PHY ID\n");
+ return ret_val;
+ }
/* Verify phy id */
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
if (phy->id != IGP01E1000_I_PHY_ID)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
case e1000_82573:
if (phy->id != M88E1111_I_PHY_ID)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
case e1000_82574:
case e1000_82583:
if (phy->id != BME1000_E_PHY_ID_R2)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
default:
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
}
- return 0;
+ if (ret_val)
+ e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+ return ret_val;
}
/**
@@ -649,6 +660,58 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
}
/**
+ * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag.
+ * LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (active)
+ data |= E1000_PHY_CTRL_D0A_LPLU;
+ else
+ data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * when active is true, else clear lplu for D3. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (!active) {
+ data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_PHY_CTRL_NOND0A_LPLU;
+ }
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
* @hw: pointer to the HW structure
*
@@ -956,7 +1019,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, ctrl_ext, icr;
+ u32 ctrl, ctrl_ext;
s32 ret_val;
/*
@@ -1040,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
if (hw->mac.type == e1000_82571) {
/* Install any alternate MAC address into RAR0 */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 2c913b8e911..5255be75374 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -38,6 +38,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/crc32.h>
#include "hw.h"
@@ -496,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index b18c644e13d..e45a61c8930 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
**/
static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
/*
@@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
ret_val = e1000_check_alt_mac_addr_generic(hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index affcacf6f5a..f8ed03dab9b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -624,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
- strncpy(drvinfo->driver, e1000e_driver_name, 32);
- strncpy(drvinfo->version, e1000e_driver_version, 32);
+ strncpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version) - 1);
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
- sprintf(firmware_version, "%d.%d-%d",
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
@@ -1704,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__E1000_TESTING, &adapter->state);
+
+ if (!if_running) {
+ /* Get control of and reset hardware */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+ }
+
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -1717,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
- else
- e1000e_reset(adapter);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1732,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000e_reset(adapter);
- /* make sure the phy is powered up */
- e1000e_power_up_phy(adapter);
if (e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1755,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
- clear_bit(__E1000_TESTING, &adapter->state);
- dev_open(netdev);
- set_bit(__E1000_TESTING, &adapter->state);
- }
+ /* Online tests */
e_info("online testing starting\n");
- /* Online tests */
- if (e1000_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Online tests aren't run; pass by default */
+ /* register, eeprom, intr and loopback tests not run online */
data[0] = 0;
data[1] = 0;
data[2] = 0;
data[3] = 0;
- if (!if_running && (adapter->flags & FLAG_HAS_AMT))
- dev_close(netdev);
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__E1000_TESTING, &adapter->state);
}
+
+ if (!if_running) {
+ e1000e_reset(adapter);
+
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_release_hw_control(adapter);
+ }
+
msleep_interruptible(4 * 1000);
}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5c2c3..e774380c7ce 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -83,6 +83,7 @@ enum e1e_registers {
E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d86cc083272..5328a292773 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1395,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
}
}
-static u32 e1000_calc_rx_da_crc(u8 mac[])
-{
- u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
- u32 i, j, mask, crc;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- crc = crc ^ mac[i];
- for (j = 8; j > 0; j--) {
- mask = (crc & 1) * (-1);
- crc = (crc >> 1) ^ (poly & mask);
- }
- }
- return ~crc;
-}
-
/**
* e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
* with 82579 PHY
@@ -1453,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
mac_addr[4] = (addr_high & 0xFF);
mac_addr[5] = ((addr_high >> 8) & 0xFF);
- ew32(PCH_RAICC(i),
- e1000_calc_rx_da_crc(mac_addr));
+ ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
}
/* Write Rx addresses to the PHY */
@@ -2977,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u16 reg;
- u32 ctrl, icr, kab;
+ u32 ctrl, kab;
s32 ret_val;
/*
@@ -3067,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(CRC_OFFSET, 0x65656565);
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
kab = er32(KABGTXD);
kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3118,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
* Reset the phy after disabling host wakeup to reset the Rx buffer.
*/
if (hw->phy.type == e1000_phy_82578) {
- hw->phy.ops.read_reg(hw, BM_WUC, &i);
+ e1e_rphy(hw, BM_WUC, &i);
ret_val = e1000_phy_hw_reset_ich8lan(hw);
if (ret_val)
return ret_val;
@@ -3276,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_82577)) {
ew32(FCRTV_PCH, hw->fc.refresh_time);
- ret_val = hw->phy.ops.write_reg(hw,
- PHY_REG(BM_PORT_CTRL_PAGE, 27),
- hw->fc.pause_time);
+ ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
if (ret_val)
return ret_val;
}
@@ -3342,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
break;
case e1000_phy_ife:
- ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
- &reg_data);
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
if (ret_val)
return ret_val;
@@ -3361,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
reg_data |= IFE_PMC_AUTO_MDIX;
break;
}
- ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
- reg_data);
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
if (ret_val)
return ret_val;
break;
@@ -3646,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
{
if (hw->phy.type == e1000_phy_ife)
return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
- (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+ (IFE_PSCL_PROBE_MODE |
+ IFE_PSCL_PROBE_LEDS_OFF));
ew32(LEDCTL, hw->mac.ledctl_mode1);
return 0;
@@ -3660,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
{
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
- (u16)hw->mac.ledctl_mode1);
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
}
/**
@@ -3672,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
**/
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
{
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
- (u16)hw->mac.ledctl_default);
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
}
/**
@@ -3704,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
}
}
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
}
/**
@@ -3735,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
}
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
}
/**
@@ -3844,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
- hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
+ e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
+ e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
+ e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_DC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+ e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
}
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7e55170a601..ff2872153b2 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1135,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+ ret_val =
+ e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
if (ret_val)
return ret_val;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fe50242aa9e..fa5b6045254 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1980,15 +1980,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
}
/**
- * e1000_get_hw_control - get control of the h/w from f/w
+ * e1000e_get_hw_control - get control of the h/w from f/w
* @adapter: address of board private structure
*
- * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the network i/f is open.
**/
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
@@ -2005,16 +2005,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
}
/**
- * e1000_release_hw_control - release control of the h/w to f/w
+ * e1000e_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
- * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the network i/f is closed.
*
**/
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
@@ -2445,7 +2445,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
return;
}
@@ -2734,6 +2734,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
else
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable jumbo frame workaround mode\n");
}
/* Program MC offset vector base */
@@ -3184,7 +3187,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
}
-
/*
* flow control settings
*
@@ -3272,7 +3274,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* that the network interface is in control
*/
if (adapter->flags & FLAG_HAS_AMT)
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
ew32(WUC, 0);
@@ -3285,6 +3287,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(VET, ETH_P_8021Q);
e1000e_reset_adaptive(hw);
+
+ if (!netif_running(adapter->netdev) &&
+ !test_bit(__E1000_TESTING, &adapter->state)) {
+ e1000_power_down_phy(adapter);
+ return;
+ }
+
e1000_get_phy_info(hw);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3570,7 +3579,7 @@ static int e1000_open(struct net_device *netdev)
* interface is now open and reset the part to a known state.
*/
if (adapter->flags & FLAG_HAS_AMT) {
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
e1000e_reset(adapter);
}
@@ -3634,7 +3643,7 @@ static int e1000_open(struct net_device *netdev)
return 0;
err_req_irq:
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter);
err_setup_rx:
@@ -3689,8 +3698,9 @@ static int e1000_close(struct net_device *netdev)
* If AMT is enabled, let the firmware know that the network
* interface is now closed
*/
- if (adapter->flags & FLAG_HAS_AMT)
- e1000_release_hw_control(adapter);
+ if ((adapter->flags & FLAG_HAS_AMT) &&
+ !test_bit(__E1000_TESTING, &adapter->state))
+ e1000e_release_hw_control(adapter);
if ((adapter->flags & FLAG_HAS_ERT) ||
(adapter->hw.mac.type == e1000_pch2lan))
@@ -5209,7 +5219,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -5366,7 +5376,7 @@ static int __e1000_resume(struct pci_dev *pdev)
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
return 0;
}
@@ -5613,7 +5623,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
}
@@ -5636,7 +5646,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strcpy(pba_str, "Unknown");
+ strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -5963,9 +5973,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
- strcpy(netdev->name, "eth%d");
+ strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -5982,12 +5992,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
err_eeprom:
if (!e1000_check_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw);
err_hw_init:
-
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
@@ -6053,7 +6062,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
e1000e_reset_interrupt_capability(adapter);
kfree(adapter->tx_ring);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 1781efeb55e..a640f1c369a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -637,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
**/
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
{
- struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
/* Enable CRS on TX. This must be set for half-duplex operation. */
- ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
goto out;
@@ -651,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
/* Enable downshift */
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
- ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
out:
return ret_val;
@@ -774,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
}
if (phy->type == e1000_phy_82578) {
- ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
return ret_val;
/* 82578 PHY - set the downshift count to 1x. */
phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
- ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- phy_data);
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
if (ret_val)
return ret_val;
}
@@ -1319,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* We didn't get link.
* Reset the DSP and cross our fingers.
*/
- ret_val = e1e_wphy(hw,
- M88E1000_PHY_PAGE_SELECT,
- 0x001d);
+ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
if (ret_val)
return ret_val;
ret_val = e1000e_phy_reset_dsp(hw);
@@ -3071,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
goto out;
/* Do not apply workaround if in PHY loopback bit 14 set */
- hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+ e1e_rphy(hw, PHY_CONTROL, &data);
if (data & PHY_CONTROL_LB)
goto out;
/* check if link is up and at 1Gbps */
- ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
if (ret_val)
goto out;
@@ -3092,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
mdelay(200);
/* flush the packets in the fifo buffer */
- ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
- HV_MUX_DATA_CTRL_GEN_TO_MAC |
- HV_MUX_DATA_CTRL_FORCE_SPEED);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
- HV_MUX_DATA_CTRL_GEN_TO_MAC);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
out:
return ret_val;
@@ -3119,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
s32 ret_val;
u16 data;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3142,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
if (ret_val)
goto out;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
if (ret_val)
goto out;
@@ -3212,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (ret_val)
goto out;
@@ -3224,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
if (ret_val)
goto out;
@@ -3258,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, length;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
if (ret_val)
goto out;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a724a2d1450..6c7257bd73f 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0106"
+#define DRV_VERSION "EHEA_0107"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1032b5bbe23..f75d3144b8a 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
}
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, i);
+ ehea_update_rq1a(pr->qp, i - 1);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
int ret;
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
- ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
- - init_attr->act_nr_rwqes_rq2
- - init_attr->act_nr_rwqes_rq3 - 1);
+ ehea_init_fill_rq1(pr, pr->rq1_skba.len);
ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d43175..2a71373719a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
*
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
* Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
@@ -45,29 +47,41 @@
#include <asm/cacheflush.h>
-#ifndef CONFIG_ARCH_MXC
+#ifndef CONFIG_ARM
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#endif
#include "fec.h"
-#ifdef CONFIG_ARCH_MXC
-#include <mach/hardware.h>
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
#define FEC_ALIGNMENT 0xf
#else
#define FEC_ALIGNMENT 0x3
#endif
-/*
- * Define the fixed address of the FEC hardware.
- */
-#if defined(CONFIG_M5272)
+#define DRIVER_NAME "fec"
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC (1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME (1 << 1)
-static unsigned char fec_mac_default[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+static struct platform_device_id fec_devtype[] = {
+ {
+ .name = DRIVER_NAME,
+ .driver_data = 0,
+ }, {
+ .name = "imx28-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ }
};
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
/*
* Some hardware gets it MAC address out of local flash memory.
* if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +147,8 @@ static unsigned char fec_mac_default[] = {
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -186,7 +201,6 @@ struct fec_enet_private {
int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
- int index;
int link;
int full_duplex;
struct completion mdio_done;
@@ -213,10 +227,23 @@ static void fec_stop(struct net_device *dev);
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+static void *swap_buffer(void *bufaddr, int len)
+{
+ int i;
+ unsigned int *buf = bufaddr;
+
+ for (i = 0; i < (len + 3) / 4; i++, buf++)
+ *buf = cpu_to_be32(*buf);
+
+ return bufaddr;
+}
+
static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
@@ -261,6 +288,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bufaddr = fep->tx_bounce[index];
}
+ /*
+ * Some design made an incorrect assumption on endian mode of
+ * the system that it's running on. As the result, driver has to
+ * swap every frame going to and coming from the controller.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, skb->len);
+
/* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb;
@@ -429,6 +464,8 @@ static void
fec_enet_rx(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
@@ -492,6 +529,9 @@ fec_enet_rx(struct net_device *dev)
dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
DMA_FROM_DEVICE);
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, pkt_len);
+
/* This does 16 byte alignment, exactly what we need.
* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
@@ -538,37 +578,50 @@ rx_processing_done:
}
/* ------------------------------------------------------------------------- */
-#ifdef CONFIG_M5272
static void __inline__ fec_get_mac(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
unsigned char *iap, tmpaddr[ETH_ALEN];
- if (FEC_FLASHMAC) {
- /*
- * Get MAC address from FLASH.
- * If it is all 1's or 0's, use the default.
- */
- iap = (unsigned char *)FEC_FLASHMAC;
- if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
- (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
- iap = fec_mac_default;
- if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
- (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
- iap = fec_mac_default;
- } else {
- *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
- *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ /*
+ * try to get mac address in following order:
+ *
+ * 1) module parameter via kernel command line in form
+ * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+ */
+ iap = macaddr;
+
+ /*
+ * 2) from flash or fuse (via platform data)
+ */
+ if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+ if (FEC_FLASHMAC)
+ iap = (unsigned char *)FEC_FLASHMAC;
+#else
+ if (pdata)
+ memcpy(iap, pdata->mac, ETH_ALEN);
+#endif
+ }
+
+ /*
+ * 3) FEC mac registers set by bootloader
+ */
+ if (!is_valid_ether_addr(iap)) {
+ *((unsigned long *) &tmpaddr[0]) =
+ be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+ *((unsigned short *) &tmpaddr[4]) =
+ be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
memcpy(dev->dev_addr, iap, ETH_ALEN);
- /* Adjust MAC if using default MAC address */
- if (iap == fec_mac_default)
- dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+ /* Adjust MAC if using macaddr */
+ if (iap == macaddr)
+ dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
}
-#endif
/* ------------------------------------------------------------------------- */
@@ -651,8 +704,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
- /* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ /* start a write op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA);
@@ -681,6 +734,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
+ int dev_id = fep->pdev->id;
fep->phy_dev = NULL;
@@ -692,6 +746,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
continue;
if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
continue;
+ if (dev_id--)
+ continue;
strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
@@ -729,10 +785,35 @@ static int fec_enet_mii_probe(struct net_device *dev)
static int fec_enet_mii_init(struct platform_device *pdev)
{
+ static struct mii_bus *fec0_mii_bus;
struct net_device *dev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
int err = -ENXIO, i;
+ /*
+ * The dual fec interfaces are not equivalent with enet-mac.
+ * Here are the differences:
+ *
+ * - fec0 supports MII & RMII modes while fec1 only supports RMII
+ * - fec0 acts as the 1588 time master while fec1 is slave
+ * - external phys can only be configured by fec0
+ *
+ * That is to say fec1 can not work independently. It only works
+ * when fec0 is working. The reason behind this design is that the
+ * second interface is added primarily for Switch mode.
+ *
+ * Because of the last point above, both phys are attached on fec0
+ * mdio interface in board design, and need to be configured by
+ * fec0 mii_bus.
+ */
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
+ /* fec1 uses fec0 mii_bus */
+ fep->mii_bus = fec0_mii_bus;
+ return 0;
+ }
+
fep->mii_timeout = 0;
/*
@@ -769,6 +850,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
+ /* save fec0 mii_bus */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fec0_mii_bus = fep->mii_bus;
+
return 0;
err_out_free_mdio_irq:
@@ -1067,9 +1152,8 @@ static const struct net_device_ops fec_netdev_ops = {
/*
* XXX: We need to clean up on failure exits here.
*
- * index is only used in legacy code
*/
-static int fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base;
@@ -1086,26 +1170,11 @@ static int fec_enet_init(struct net_device *dev, int index)
spin_lock_init(&fep->hw_lock);
- fep->index = index;
fep->hwp = (void __iomem *)dev->base_addr;
fep->netdev = dev;
- /* Set the Ethernet address */
-#ifdef CONFIG_M5272
+ /* Get the Ethernet address */
fec_get_mac(dev);
-#else
- {
- unsigned long l;
- l = readl(fep->hwp + FEC_ADDR_LOW);
- dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
- dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
- dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
- dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
- l = readl(fep->hwp + FEC_ADDR_HIGH);
- dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
- dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
- }
-#endif
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
@@ -1156,12 +1225,25 @@ static void
fec_restart(struct net_device *dev, int duplex)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
int i;
+ u32 val, temp_mac[2];
/* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ }
+
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
@@ -1208,20 +1290,45 @@ fec_restart(struct net_device *dev, int duplex)
/* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-#ifdef FEC_MIIGSK_ENR
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
- /* disable the gasket and wait */
- writel(0, fep->hwp + FEC_MIIGSK_ENR);
- while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
- udelay(1);
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ val = readl(fep->hwp + FEC_R_CNTRL);
- /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
- writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+ /* MII or RMII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val |= (1 << 8);
+ else
+ val &= ~(1 << 8);
- /* re-enable the gasket */
- writel(2, fep->hwp + FEC_MIIGSK_ENR);
- }
+ /* 10M or 100M */
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+ val &= ~(1 << 9);
+ else
+ val |= (1 << 9);
+
+ writel(val, fep->hwp + FEC_R_CNTRL);
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ */
+ writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
+ }
#endif
+ }
/* And last, enable the transmit and receive processing */
writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1423,7 @@ fec_probe(struct platform_device *pdev)
}
clk_enable(fep->clk);
- ret = fec_enet_init(ndev, 0);
+ ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1380,8 +1487,10 @@ fec_suspend(struct device *dev)
if (ndev) {
fep = netdev_priv(ndev);
- if (netif_running(ndev))
- fec_enet_close(ndev);
+ if (netif_running(ndev)) {
+ fec_stop(ndev);
+ netif_device_detach(ndev);
+ }
clk_disable(fep->clk);
}
return 0;
@@ -1396,8 +1505,10 @@ fec_resume(struct device *dev)
if (ndev) {
fep = netdev_priv(ndev);
clk_enable(fep->clk);
- if (netif_running(ndev))
- fec_enet_open(ndev);
+ if (netif_running(ndev)) {
+ fec_restart(ndev, fep->full_duplex);
+ netif_device_attach(ndev);
+ }
}
return 0;
}
@@ -1414,12 +1525,13 @@ static const struct dev_pm_ops fec_pm_ops = {
static struct platform_driver fec_driver = {
.driver = {
- .name = "fec",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &fec_pm_ops,
#endif
},
+ .id_table = fec_devtype,
.probe = fec_probe,
.remove = __devexit_p(fec_drv_remove),
};
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25668d..ace318df4c8 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
/****************************************************************************/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
/*
* Define the buffer descriptor structure.
*/
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cd2d72d825d..af09296ef0d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3949,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
writel(flags, base + NvRegWakeUpFlags);
spin_unlock_irq(&np->lock);
}
+ device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
return 0;
}
@@ -5488,14 +5489,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* set mac address */
nv_copy_mac_to_hw(dev);
- /* Workaround current PCI init glitch: wakeup bits aren't
- * being set from PCI PM capability.
- */
- device_init_wakeup(&pci_dev->dev, 1);
-
/* disable WOL */
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
+ device_set_wakeup_enable(&pci_dev->dev, false);
if (id->driver_data & DEV_HAS_POWER_CNTRL) {
@@ -5746,8 +5743,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
}
#ifdef CONFIG_PM
-static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+static int nv_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
@@ -5763,25 +5761,17 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i <= np->register_size/sizeof(u32); i++)
np->saved_config_space[i] = readl(base + i*sizeof(u32));
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int nv_resume(struct pci_dev *pdev)
+static int nv_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i, rc = 0;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* ack any pending wake events, disable PME */
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* restore non-pci configuration space */
for (i = 0; i <= np->register_size/sizeof(u32); i++)
writel(np->saved_config_space[i], base+i*sizeof(u32));
@@ -5800,6 +5790,9 @@ static int nv_resume(struct pci_dev *pdev)
return rc;
}
+static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
+#define NV_PM_OPS (&nv_pm_ops)
+
static void nv_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,15 +5815,13 @@ static void nv_shutdown(struct pci_dev *pdev)
* only put the device into D3 if we really go for poweroff.
*/
if (system_state == SYSTEM_POWER_OFF) {
- if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
- pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
+ pci_wake_from_d3(pdev, np->wolenabled);
pci_set_power_state(pdev, PCI_D3hot);
}
}
#else
-#define nv_suspend NULL
+#define NV_PM_OPS NULL
#define nv_shutdown NULL
-#define nv_resume NULL
#endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6002,9 +5993,8 @@ static struct pci_driver driver = {
.id_table = pci_tbl,
.probe = nv_probe,
.remove = __devexit_p(nv_remove),
- .suspend = nv_suspend,
- .resume = nv_resume,
.shutdown = nv_shutdown,
+ .driver.pm = NV_PM_OPS,
};
static int __init init_nic(void)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0a234..7d9ced0738c 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
while (p) {
if (p->bitrate == bitrate) {
memcpy(p->bits, bits, YAM_FPGA_SIZE);
- return p->bits;
+ goto out;
}
p = p->next;
}
@@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
p->bitrate = bitrate;
p->next = yam_data;
yam_data = p;
-
+ out:
release_firmware(fw);
return p->bits;
}
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index b54a6f08db4..e3b285a6773 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,6 +26,8 @@
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/portmux.h>
+#include <mach/bfin_serial_5xx.h>
+#undef DRIVER_NAME
#ifdef CONFIG_SIR_BFIN_DMA
struct dma_rx_buf {
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8cb7d..3b8c9246361 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@@ -524,26 +526,13 @@ extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
u8 queue);
extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue);
-extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 vlan_id);
-extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
- u32 src_addr);
-extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
- u32 dst_addr);
-extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
- u16 src_port);
-extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
- u16 dst_port);
-extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
- u16 flex_byte);
-extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
- u8 l4type);
extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring);
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index bfd3c227cd4..8d316d9cd29 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1003,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10);
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw ,"Flow Director previous command isn't complete, "
+ hw_dbg(hw, "Flow Director previous command isn't complete, "
"aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1113,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Move the flexible bytes to use the ethertype - shift 6 words */
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
- fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1209,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1251,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
**/
-static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
- u32 key)
+static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ u32 key)
{
/*
* The algorithm is as follows:
@@ -1272,410 +1267,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
* To simplify for programming, the algorithm is implemented
* in software this way:
*
- * Key[31:0], Stream[335:0]
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
- * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
- * int_key[350:0] = tmp_key[351:1]
- * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+ * hi_hash_dword[31:0] ^= Stream[351:320];
*
- * hash[15:0] = 0;
- * for (i = 0; i < 351; i++) {
- * if (int_key[i])
- * hash ^= int_stream[(i + 15):i];
+ * if(key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
+ *
*/
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 hash_result = 0;
+ u8 i;
- union {
- u64 fill[6];
- u32 key[11];
- u8 key_stream[44];
- } tmp_key;
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
- u8 *stream = (u8 *)atr_input;
- u8 int_key[44]; /* upper-most bit unused */
- u8 hash_str[46]; /* upper-most 2 bits unused */
- u16 hash_result = 0;
- int i, j, k, h;
+ /* generate common hash dword */
+ for (i = 10; i; i -= 2)
+ common_hash_dword ^= atr_input->dword_stream[i] ^
+ atr_input->dword_stream[i - 1];
- /*
- * Initialize the fill member to prevent warnings
- * on some compilers
- */
- tmp_key.fill[0] = 0;
+ hi_hash_dword = ntohl(common_hash_dword);
- /* First load the temporary key stream */
- for (i = 0; i < 6; i++) {
- u64 fillkey = ((u64)key << 32) | key;
- tmp_key.fill[i] = fillkey;
- }
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
- /*
- * Set the interim key for the hashing. Bit 352 is unused, so we must
- * shift and compensate when building the key.
- */
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- int_key[0] = tmp_key.key_stream[0] >> 1;
- for (i = 1, j = 0; i < 44; i++) {
- unsigned int this_key = tmp_key.key_stream[j] << 7;
- j++;
- int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
- }
-
- /*
- * Set the interim bit string for the hashing. Bits 368 and 367 are
- * unused, so shift and compensate when building the string.
- */
- hash_str[0] = (stream[40] & 0x7f) >> 1;
- for (i = 1, j = 40; i < 46; i++) {
- unsigned int this_str = stream[j] << 7;
- j++;
- if (j > 41)
- j = 0;
- hash_str[i] = (u8)(this_str | (stream[j] >> 1));
- }
+ /* Process bits 0 and 16 */
+ if (key & 0x0001) hash_result ^= lo_hash_dword;
+ if (key & 0x00010000) hash_result ^= hi_hash_dword;
/*
- * Now compute the hash. i is the index into hash_str, j is into our
- * key stream, k is counting the number of bits, and h interates within
- * each byte.
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
*/
- for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
- for (h = 0; h < 8 && k < 351; h++, k++) {
- if (int_key[j] & (1 << h)) {
- /*
- * Key bit is set, XOR in the current 16-bit
- * string. Example of processing:
- * h = 0,
- * tmp = (hash_str[i - 2] & 0 << 16) |
- * (hash_str[i - 1] & 0xff << 8) |
- * (hash_str[i] & 0xff >> 0)
- * So tmp = hash_str[15 + k:k], since the
- * i + 2 clause rolls off the 16-bit value
- * h = 7,
- * tmp = (hash_str[i - 2] & 0x7f << 9) |
- * (hash_str[i - 1] & 0xff << 1) |
- * (hash_str[i] & 0x80 >> 7)
- */
- int tmp = (hash_str[i] >> h);
- tmp |= (hash_str[i - 1] << (8 - h));
- tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
- << (16 - h);
- hash_result ^= (u16)tmp;
- }
- }
- }
-
- return hash_result;
-}
-
-/**
- * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- * @input: input stream to modify
- * @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
-{
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- * @input: input stream to modify
- * @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
-{
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
- (src_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
- (src_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- * @input: input stream to modify
- * @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
-{
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
- (dst_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
- (dst_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
- return 0;
-}
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-/**
- * ixgbe_atr_set_src_port_82599 - Sets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
- **/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
-{
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_dst_port_82599 - Sets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
- **/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
-{
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
-{
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
-{
- input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- * @input: input stream to search
- * @vlan: the VLAN id to load
- **/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
-{
- *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
- *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- * @input: input stream to search
- * @src_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
- u32 *src_addr)
-{
- *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
-
- return 0;
-}
-/**
- * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- * @input: input stream to search
- * @dst_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
- u32 *dst_addr)
-{
- *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i-- ) {
+ if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ }
- return 0;
+ return hash_result & IXGBE_ATR_HASH_MASK;
}
-/**
- * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- * @input: input stream to search
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 *src_addr_1, u32 *src_addr_2,
- u32 *src_addr_3, u32 *src_addr_4)
-{
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
-
- return 0;
-}
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
/**
- * ixgbe_atr_get_src_port_82599 - Gets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
*
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
+ * This function is almost identical to the function above but contains
+ * several optomizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
**/
-static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
- u16 *src_port)
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
{
- *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
- *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
- return 0;
-}
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input.dword);
-/**
- * ixgbe_atr_get_dst_port_82599 - Gets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
- *
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
- **/
-static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
- u16 *dst_port)
-{
- *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
- *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(common.dword);
- return 0;
-}
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-/**
- * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
- u16 *flex_byte)
-{
- *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
- *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- return 0;
-}
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
-/**
- * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
- u8 *l4type)
-{
- *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
- return 0;
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
}
/**
* ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
- * @stream: input bitstream
+ * @input: unique input dword
+ * @common: compressed common input dword
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
u8 queue)
{
u64 fdirhashcmd;
- u64 fdircmd;
- u32 fdirhash;
- u16 bucket_hash, sig_hash;
- u8 l4type;
-
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- sig_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- /* Get the l4type in order to program FDIRCMD properly */
- /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
- ixgbe_atr_get_l4type_82599(input, &l4type);
+ u32 fdircmd;
/*
- * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
- * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
- fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
break;
default:
- hw_dbg(hw, "Error on l4type input\n");
+ hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
- fdircmd |= IXGBE_FDIRCMD_IPV6;
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
- fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
- fdirhashcmd = ((fdircmd << 32) | fdirhash);
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+ hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
return 0;
}
/**
+ * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+ u32 mask = ntohs(input_masks->dst_port_mask);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= ntohs(input_masks->src_port_mask);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ (((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+/**
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
@@ -1687,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue)
{
- u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4 = 0, dst_ipv4 = 0;
- u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
- u16 src_port, dst_port, vlan_id, flex_bytes;
- u16 bucket_hash;
- u8 l4type;
- u8 fdirm = 0;
-
- /* Get our input values */
- ixgbe_atr_get_l4type_82599(input, &l4type);
+ u32 fdircmd;
+ u32 fdirport, fdirtcpm;
+ u32 fdirvlan;
+ /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+ u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+ IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
/*
- * Check l4type formatting, and bail out before we touch the hardware
+ * Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue
*/
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
break;
default:
- hw_dbg(hw, "Error on l4type input\n");
+ hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
- ixgbe_atr_get_src_port_82599(input, &src_port);
- ixgbe_atr_get_dst_port_82599(input, &dst_port);
- ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
- fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- /* Now figure out if we're IPv4 or IPv6 */
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
- /* IPv6 */
- ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
- &src_ipv6_3, &src_ipv6_4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
- /* The last 4 bytes is the same register as IPv4 */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
- fdircmd |= IXGBE_FDIRCMD_IPV6;
- fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
- } else {
- /* IPv4 */
- ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
- }
-
- ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
- (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
/*
- * Program the relevant mask registers. L4type cannot be
- * masked out in this implementation.
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
*
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (input_masks->dst_port_mask << 16)));
+
+ /* Program FDIRM */
+ switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
+ case 0xEFFF:
+ /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
+ case 0xE000:
+ /* Unmask VLAN prio - bit 1 */
+ fdirm &= ~IXGBE_FDIRM_VLANP;
break;
- case IXGBE_ATR_L4TYPE_UDP:
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (input_masks->src_port_mask << 16)));
+ case 0x0FFF:
+ /* Unmask VLAN ID - bit 0 */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
break;
- default:
- /* this already would have failed above */
+ case 0x0000:
+ /* do nothing, vlans already masked */
break;
+ default:
+ hw_dbg(hw, " Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
}
- /* Program the last mask register, FDIRM */
- if (input_masks->vlan_id_mask)
- /* Mask both VLAN and VLANP - bits 0 and 1 */
- fdirm |= 0x3;
-
- if (input_masks->data_mask)
- /* Flex bytes need masking, so mask the whole thing - bit 4 */
- fdirm |= 0x10;
+ if (input_masks->flex_mask & 0xFFFF) {
+ if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+ hw_dbg(hw, " Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ /* Unmask Flex Bytes - bit 4 */
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ }
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
- fdirm |= 0x24;
-
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
- fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
- fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
- fdircmd |= IXGBE_FDIRCMD_LAST;
- fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_masks->src_ip_mask[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_masks->dst_ip_mask[0]);
+
+ /* Apply masks to input data */
+ input->formatted.vlan_id &= input_masks->vlan_id_mask;
+ input->formatted.flex_bytes &= input_masks->flex_mask;
+ input->formatted.src_port &= input_masks->src_port_mask;
+ input->formatted.dst_port &= input_masks->dst_port_mask;
+ input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+ input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan =
+ IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = ntohs(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= ntohs(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /* we only want the bucket hash so drop the upper 16 bits */
+ fdirhash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY);
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return 0;
}
+
/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e8b39..2002ea88ca2 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
- reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
+ ixgbe_disable_rx_queue(adapter, rx_ring);
/* now Tx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@@ -2279,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
struct ethtool_rx_ntuple *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
- struct ixgbe_atr_input input_struct;
+ struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
+ union ixgbe_atr_input input_struct;
struct ixgbe_atr_input_masks input_masks;
int target_queue;
+ int err;
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
return -EOPNOTSUPP;
@@ -2291,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
* Don't allow programming if the action is a queue greater than
* the number of online Tx queues.
*/
- if ((fs.action >= adapter->num_tx_queues) ||
- (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+ if ((fs->action >= adapter->num_tx_queues) ||
+ (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
return -EINVAL;
- memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
- input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
- input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
- input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
- input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
- input_masks.vlan_id_mask = fs.vlan_tag_mask;
- /* only use the lowest 2 bytes for flex bytes */
- input_masks.data_mask = (fs.data_mask & 0xffff);
-
- switch (fs.flow_type) {
+ /* record flow type */
+ switch (fs->flow_type) {
+ case IPV4_FLOW:
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
case TCP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
default:
return -1;
}
- /* Mask bits from the inputs based on user-supplied mask */
- ixgbe_atr_set_src_ipv4_82599(&input_struct,
- (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
- ixgbe_atr_set_dst_ipv4_82599(&input_struct,
- (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
- /* 82599 expects these to be byte-swapped for perfect filtering */
- ixgbe_atr_set_src_port_82599(&input_struct,
- ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
- ixgbe_atr_set_dst_port_82599(&input_struct,
- ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
-
- /* VLAN and Flex bytes are either completely masked or not */
- if (!fs.vlan_tag_mask)
- ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
-
- if (!input_masks.data_mask)
- /* make sure we only use the first 2 bytes of user data */
- ixgbe_atr_set_flex_byte_82599(&input_struct,
- (fs.data & 0xffff));
+ /* copy vlan tag minus the CFI bit */
+ if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
+ input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
+ if (!fs->vlan_tag_mask) {
+ input_masks.vlan_id_mask = htons(0xEFFF);
+ } else {
+ switch (~fs->vlan_tag_mask & 0xEFFF) {
+ /* all of these are valid vlan-mask values */
+ case 0xEFFF:
+ case 0xE000:
+ case 0x0FFF:
+ case 0x0000:
+ input_masks.vlan_id_mask =
+ htons(~fs->vlan_tag_mask);
+ break;
+ /* exit with error if vlan-mask is invalid */
+ default:
+ e_err(drv, "Partial VLAN ID or "
+ "priority mask in vlan-mask is not "
+ "supported by hardware\n");
+ return -1;
+ }
+ }
+ }
+
+ /* make sure we only use the first 2 bytes of user data */
+ if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
+ input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
+ if (!(fs->data_mask & 0xFFFF)) {
+ input_masks.flex_mask = 0xFFFF;
+ } else if (~fs->data_mask & 0xFFFF) {
+ e_err(drv, "Partial user-def-mask is not "
+ "supported by hardware\n");
+ return -1;
+ }
+ }
+
+ /*
+ * Copy input into formatted structures
+ *
+ * These assignments are based on the following logic
+ * If neither input or mask are set assume value is masked out.
+ * If input is set, but mask is not mask should default to accept all.
+ * If input is not set, but mask is set then mask likely results in 0.
+ * If input is set and mask is set then assign both.
+ */
+ if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
+ input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
+ if (!fs->m_u.tcp_ip4_spec.ip4src)
+ input_masks.src_ip_mask[0] = 0xFFFFFFFF;
+ else
+ input_masks.src_ip_mask[0] =
+ ~fs->m_u.tcp_ip4_spec.ip4src;
+ }
+ if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
+ input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
+ if (!fs->m_u.tcp_ip4_spec.ip4dst)
+ input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
+ else
+ input_masks.dst_ip_mask[0] =
+ ~fs->m_u.tcp_ip4_spec.ip4dst;
+ }
+ if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
+ input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
+ if (!fs->m_u.tcp_ip4_spec.psrc)
+ input_masks.src_port_mask = 0xFFFF;
+ else
+ input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+ }
+ if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
+ input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
+ if (!fs->m_u.tcp_ip4_spec.pdst)
+ input_masks.dst_port_mask = 0xFFFF;
+ else
+ input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+ }
/* determine if we need to drop or route the packet */
- if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
target_queue = MAX_RX_QUEUES - 1;
else
- target_queue = fs.action;
+ target_queue = fs->action;
spin_lock(&adapter->fdir_perfect_lock);
- ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
- &input_masks, 0, target_queue);
+ err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
+ &input_struct,
+ &input_masks, 0,
+ target_queue);
spin_unlock(&adapter->fdir_perfect_lock);
- return 0;
+ return err ? -1 : 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3f819..a060610a42d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+ "the polling period\n", reg_idx);
+ }
+}
+
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
- rxdctl & ~IXGBE_RXDCTL_ENABLE);
- IXGBE_WRITE_FLUSH(hw);
+ ixgbe_disable_rx_queue(adapter, ring);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_FLUSH(hw);
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
msleep(10);
netif_tx_stop_all_queues(netdev);
@@ -4789,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
+ IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ e_err(probe,
+ "Flow Director is not supported while multiple "
+ "queues are disabled. Disabling Flow Director\n");
+ }
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
@@ -5094,16 +5132,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- if (dev->features & NETIF_F_NTUPLE) {
- /* Flow Director perfect filter enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- adapter->atr_sample_rate = 0;
- spin_lock_init(&adapter->fdir_perfect_lock);
- } else {
- /* Flow Director hash filters enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 20;
- }
+ /* n-tuple support exists, always init our spinlock */
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0;
@@ -6474,38 +6507,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
writel(i, tx_ring->tail);
}
-static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- u8 queue, u32 tx_flags, __be16 protocol)
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
{
- struct ixgbe_atr_input atr_input;
- struct iphdr *iph = ip_hdr(skb);
- struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+ union ixgbe_atr_hash_dword input = { .dword = 0 };
+ union ixgbe_atr_hash_dword common = { .dword = 0 };
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
struct tcphdr *th;
- u16 vlan_id;
+ __be16 vlan_id;
- /* Right now, we support IPv4 w/ TCP only */
- if (protocol != htons(ETH_P_IP) ||
- iph->protocol != IPPROTO_TCP)
+ /* if ring doesn't have a interrupt vector, cannot perform ATR */
+ if (!q_vector)
return;
- memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+ /* do nothing if sampling is disabled */
+ if (!ring->atr_sample_rate)
+ return;
- vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
- IXGBE_TX_FLAGS_VLAN_SHIFT;
+ ring->atr_count++;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if ((protocol != __constant_htons(ETH_P_IPV6) ||
+ hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+ (protocol != __constant_htons(ETH_P_IP) ||
+ hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
th = tcp_hdr(skb);
- ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
- ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
- ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
- ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
- ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
- /* src and dst are inverted, think how the receiver sees them */
- ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
- ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
+ /* skip this packet since the socket is closing */
+ if (th->fin)
+ return;
+
+ /* sample on all syn packets or once every atr sample count */
+ if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+ return;
+
+ /* reset sample count */
+ ring->atr_count = 0;
+
+ vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+ /*
+ * src and dst are inverted, think how the receiver sees them
+ *
+ * The input is broken into two sections, a non-compressed section
+ * containing vm_pool, vlan_id, and flow_type. The rest of the data
+ * is XORed together and stored in the compressed dword.
+ */
+ input.formatted.vlan_id = vlan_id;
+
+ /*
+ * since src port and flex bytes occupy the same word XOR them together
+ * and write the value to source port portion of compressed dword
+ */
+ if (vlan_id)
+ common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ else
+ common.port.src ^= th->dest ^ protocol;
+ common.port.dst ^= th->source;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+ } else {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+ hdr.ipv6->saddr.s6_addr32[1] ^
+ hdr.ipv6->saddr.s6_addr32[2] ^
+ hdr.ipv6->saddr.s6_addr32[3] ^
+ hdr.ipv6->daddr.s6_addr32[0] ^
+ hdr.ipv6->daddr.s6_addr32[1] ^
+ hdr.ipv6->daddr.s6_addr32[2] ^
+ hdr.ipv6->daddr.s6_addr32[3];
+ }
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
- ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+ input, common, ring->queue_index);
}
static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@@ -6676,16 +6763,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
if (count) {
/* add the ATR filter if ATR is on */
- if (tx_ring->atr_sample_rate) {
- ++tx_ring->atr_count;
- if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &tx_ring->state)) {
- ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags, protocol);
- tx_ring->atr_count = 0;
- }
- }
+ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+ ixgbe_atr(tx_ring, skb, tx_flags, protocol);
txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
txq->tx_bytes += skb->len;
txq->tx_packets++;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 446f3467d3c..fd3358f5413 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004
-#define IXGBE_FDIRM_L3P 0x00000008
-#define IXGBE_FDIRM_L4P 0x00000010
-#define IXGBE_FDIRM_FLEX 0x00000020
-#define IXGBE_FDIRM_DIPv6 0x00000040
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -1990,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_LAST 0x00000800
#define IXGBE_FDIRCMD_COLLISION 0x00001000
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2147,51 +2147,80 @@ typedef u32 ixgbe_physical_layer;
#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
-
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET 0
-#define IXGBE_ATR_SRC_IPV6_OFFSET 2
-#define IXGBE_ATR_SRC_IPV4_OFFSET 14
-#define IXGBE_ATR_DST_IPV6_OFFSET 18
-#define IXGBE_ATR_DST_IPV4_OFFSET 30
-#define IXGBE_ATR_SRC_PORT_OFFSET 34
-#define IXGBE_ATR_DST_PORT_OFFSET 36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET 40
-#define IXGBE_ATR_L4TYPE_OFFSET 41
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
#define IXGBE_ATR_L4TYPE_UDP 0x1
#define IXGBE_ATR_L4TYPE_TCP 0x2
#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
/* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
- /* Byte layout in order, all values with MSB first:
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
*
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
* vlan_id - 2 bytes
* src_ip - 16 bytes
* dst_ip - 16 bytes
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
- * vm_pool - 1 byte
- * l4type - 1 byte
+ * rsvd0 - 2 bytes - space reserved must be 0.
*/
- u8 byte_stream[42];
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 rsvd0;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
};
struct ixgbe_atr_input_masks {
- u32 src_ip_mask;
- u32 dst_ip_mask;
- u16 src_port_mask;
- u16 dst_port_mask;
- u16 vlan_id_mask;
- u16 data_mask;
+ __be16 rsvd0;
+ __be16 vlan_id_mask;
+ __be32 dst_ip_mask[4];
+ __be32 src_ip_mask[4];
+ __be16 src_port_mask;
+ __be16 dst_port_mask;
+ __be16 flex_mask;
};
enum ixgbe_eeprom_type {
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f07c1..3a4277f6fac 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
} else {
int i;
+ buf->direct.buf = NULL;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
buf->direct.map);
else {
- if (BITS_PER_LONG == 64)
+ if (BITS_PER_LONG == 64 && buf->direct.buf)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b361e..897f576b8b1 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int i;
int err;
- dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
+ dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+ prof->tx_ring_num, prof->rx_ring_num);
if (dev == NULL) {
mlx4_err(mdev, "Net device allocation failed\n");
return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18ba278..5de1db89783 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
- if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
- mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+ if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
field = 3;
- }
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2c158910f7e..e953793a33f 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6481b..a1b82c9c67d 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/uaccess.h>
#include <asm/string.h>
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap)
data = ap->tpkt->data;
count = ap->tpkt->len;
fcs = ap->tfcs;
- proto = (data[0] << 8) + data[1];
+ proto = get_unaligned_be16(data);
/*
* LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
code = data[0];
if (code != CONFACK && code != CONFREQ)
return;
- dlen = (data[2] << 8) + data[3];
+ dlen = get_unaligned_be16(data + 2);
if (len < dlen)
return; /* packet got truncated or length is bogus */
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
switch (data[0]) {
case LCP_MRU:
- val = (data[2] << 8) + data[3];
+ val = get_unaligned_be16(data + 2);
if (inbound)
ap->mru = val;
else
ap->chan.mtu = val;
break;
case LCP_ASYNCMAP:
- val = (data[2] << 24) + (data[3] << 16)
- + (data[4] << 8) + data[5];
+ val = get_unaligned_be32(data + 2);
if (inbound)
ap->raccm = val;
else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83e0cf..43583309a65 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
#include <linux/ppp-comp.h>
#include <linux/zlib.h>
+#include <asm/unaligned.h>
/*
* State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
*/
wptr[0] = PPP_ADDRESS(rptr);
wptr[1] = PPP_CONTROL(rptr);
- wptr[2] = PPP_COMP >> 8;
- wptr[3] = PPP_COMP;
+ put_unaligned_be16(PPP_COMP, wptr + 2);
wptr += PPP_HDRLEN;
- wptr[0] = state->seqno >> 8;
- wptr[1] = state->seqno;
+ put_unaligned_be16(state->seqno, wptr);
wptr += DEFLATE_OVHD;
olen = PPP_HDRLEN + DEFLATE_OVHD;
state->strm.next_out = wptr;
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize,
}
/* Check the sequence number. */
- seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1];
+ seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
if (seq != (state->seqno & 0xffff)) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6456484c029..c7a6c446697 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <net/slhc_vj.h>
#include <asm/atomic.h>
@@ -210,7 +211,7 @@ struct ppp_net {
};
/* Get the PPP protocol number from a skb */
-#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
+#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
/* We limit the length of ppp->file.rq to this (arbitrary) value */
#define PPP_MAX_RQLEN 32
@@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
pp = skb_push(skb, 2);
proto = npindex_to_proto[npi];
- pp[0] = proto >> 8;
- pp[1] = proto;
+ put_unaligned_be16(proto, pp);
netif_stop_queue(dev);
skb_queue_tail(&ppp->file.xq, skb);
@@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
q = skb_put(frag, flen + hdrlen);
/* make the MP header */
- q[0] = PPP_MP >> 8;
- q[1] = PPP_MP;
+ put_unaligned_be16(PPP_MP, q);
if (ppp->flags & SC_MP_XSHORTSEQ) {
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b80cc3..9a1849a83e2 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
#include "ppp_mppe.h"
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
*/
obuf[0] = PPP_ADDRESS(ibuf);
obuf[1] = PPP_CONTROL(ibuf);
- obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */
- obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */
+ put_unaligned_be16(PPP_COMP, obuf + 2);
obuf += PPP_HDRLEN;
state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
if (state->debug >= 7)
printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
state->ccount);
- obuf[0] = state->ccount >> 8;
- obuf[1] = state->ccount & 0xff;
+ put_unaligned_be16(state->ccount, obuf);
if (!state->stateful || /* stateless mode */
((state->ccount & 0xff) == 0xff) || /* "flag" packet */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3fb8d..4e6b72f57de 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/uaccess.h>
#define PPP_VERSION "2.4.2"
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
int islcp;
data = skb->data;
- proto = (data[0] << 8) + data[1];
+ proto = get_unaligned_be16(data);
/* LCP packets with codes between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9c2a02d204d..44e316fd67b 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -34,8 +34,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 14
-#define QLCNIC_LINUX_VERSIONID "5.0.14"
+#define _QLCNIC_LINUX_SUBVERSION 15
+#define QLCNIC_LINUX_VERSIONID "5.0.15"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -289,6 +289,26 @@ struct uni_data_desc{
u32 reserved[5];
};
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FW_IMAGE_REGION 0x74
+struct qlcnic_flt_header {
+ u16 version;
+ u16 len;
+ u16 checksum;
+ u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+ u8 region;
+ u8 reserved0;
+ u8 attrib;
+ u8 reserved1;
+ u32 size;
+ u32 start_addr;
+ u32 end_add;
+};
+
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 1e7af709d39..4c14510e2a8 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[1])
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
data[2] = qlcnic_irq_test(dev);
if (data[2])
eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9b9c7c39d3e..a7f1d5b7e81 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
return 0;
}
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+ struct qlcnic_flt_entry *region_entry)
+{
+ struct qlcnic_flt_header flt_hdr;
+ struct qlcnic_flt_entry *flt_entry;
+ int i = 0, ret;
+ u32 entry_size;
+
+ memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+ (u8 *)&flt_hdr,
+ sizeof(struct qlcnic_flt_header));
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout header\n");
+ return -EIO;
+ }
+
+ entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+ flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
+ if (flt_entry == NULL) {
+ dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ return -EIO;
+ }
+
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+ sizeof(struct qlcnic_flt_header),
+ (u8 *)flt_entry, entry_size);
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout entries\n");
+ goto err_out;
+ }
+
+ while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ if (flt_entry[i].region == region)
+ break;
+ i++;
+ }
+ if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ dev_warn(&adapter->pdev->dev,
+ "region=%x not found in %d regions\n", region, i);
+ ret = -EIO;
+ goto err_out;
+ }
+ memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+ vfree(flt_entry);
+ return ret;
+}
+
int
qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_flt_entry fw_entry;
u32 ver = -1, min_ver;
+ int ret;
- qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
+ if (!ret)
+ /* 0-4:-signature, 4-8:-fw version */
+ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+ (int *)&ver);
+ else
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+ (int *)&ver);
ver = QLCNIC_DECODE_VERSION(ver);
min_ver = QLCNIC_MIN_FW_VERSION;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 11e3a46c091..37c04b4fade 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn;
-module_param(qlcnic_mac_learn, int, 0644);
+module_param(qlcnic_mac_learn, int, 0444);
MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
static int use_msi = 1;
-module_param(use_msi, int, 0644);
+module_param(use_msi, int, 0444);
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
static int use_msi_x = 1;
-module_param(use_msi_x, int, 0644);
+module_param(use_msi_x, int, 0444);
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
static int load_fw_file;
-module_param(load_fw_file, int, 0644);
+module_param(load_fw_file, int, 0444);
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
static int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0644);
+module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
static int __devinit qlcnic_probe(struct pci_dev *pdev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 27a7c20f64c..bb8645ab247 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1632,36 +1632,134 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
{
__le32 *phytable = (__le32 *)fw->data;
struct net_device *dev = tp->dev;
- size_t i;
+ size_t index, fw_size = fw->size / sizeof(*phytable);
+ u32 predata, count;
if (fw->size % sizeof(*phytable)) {
netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
return;
}
- for (i = 0; i < fw->size / sizeof(*phytable); i++) {
- u32 action = le32_to_cpu(phytable[i]);
+ for (index = 0; index < fw_size; index++) {
+ u32 action = le32_to_cpu(phytable[index]);
+ u32 regno = (action & 0x0fff0000) >> 16;
- if (!action)
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ case PHY_DATA_OR:
+ case PHY_DATA_AND:
+ case PHY_READ_EFUSE:
+ case PHY_CLEAR_READCOUNT:
+ case PHY_WRITE:
+ case PHY_WRITE_PREVIOUS:
+ case PHY_DELAY_MS:
+ break;
+
+ case PHY_BJMPN:
+ if (regno > index) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (index + 2 >= fw_size) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ case PHY_COMP_NEQ_SKIPN:
+ case PHY_SKIPN:
+ if (index + 1 + regno >= fw_size) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
break;
- if ((action & 0xf0000000) != PHY_WRITE) {
- netif_err(tp, probe, dev,
- "unknown action 0x%08x\n", action);
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ netif_err(tp, probe, tp->dev,
+ "Invalid action 0x%08x\n", action);
return;
}
}
- while (i-- != 0) {
- u32 action = le32_to_cpu(*phytable);
+ predata = 0;
+ count = 0;
+
+ for (index = 0; index < fw_size; ) {
+ u32 action = le32_to_cpu(phytable[index]);
u32 data = action & 0x0000ffff;
- u32 reg = (action & 0x0fff0000) >> 16;
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ if (!action)
+ break;
switch(action & 0xf0000000) {
+ case PHY_READ:
+ predata = rtl_readphy(tp, regno);
+ count++;
+ index++;
+ break;
+ case PHY_DATA_OR:
+ predata |= data;
+ index++;
+ break;
+ case PHY_DATA_AND:
+ predata &= data;
+ index++;
+ break;
+ case PHY_BJMPN:
+ index -= regno;
+ break;
+ case PHY_READ_EFUSE:
+ predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ index++;
+ break;
+ case PHY_CLEAR_READCOUNT:
+ count = 0;
+ index++;
+ break;
case PHY_WRITE:
- rtl_writephy(tp, reg, data);
- phytable++;
+ rtl_writephy(tp, regno, data);
+ index++;
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (count == data)
+ index += 2;
+ else
+ index += 1;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ if (predata == data)
+ index += regno;
+ index++;
break;
+ case PHY_COMP_NEQ_SKIPN:
+ if (predata != data)
+ index += regno;
+ index++;
+ break;
+ case PHY_WRITE_PREVIOUS:
+ rtl_writephy(tp, regno, predata);
+ index++;
+ break;
+ case PHY_SKIPN:
+ index += regno + 1;
+ break;
+ case PHY_DELAY_MS:
+ mdelay(data);
+ index++;
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
default:
BUG();
}
@@ -3069,15 +3167,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- rtl8169_init_phy(dev, tp);
-
- /*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
-
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
if (pci_dev_run_wake(pdev))
@@ -3127,6 +3216,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
static int rtl8169_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
@@ -3162,6 +3252,15 @@ static int rtl8169_open(struct net_device *dev)
napi_enable(&tp->napi);
+ rtl8169_init_phy(dev, tp);
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+
rtl_pll_power_up(tp);
rtl_hw_start(dev);
@@ -3171,7 +3270,7 @@ static int rtl8169_open(struct net_device *dev)
tp->saved_wolopts = 0;
pm_runtime_put_noidle(&pdev->dev);
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ rtl8169_check_link_status(dev, tp, ioaddr);
out:
return retval;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 39996bf3b24..7d85a38377a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
#include <asm/irq.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define SKY2_VLAN_TAG_USED 1
-#endif
-
#include "sky2.h"
#define DRV_NAME "sky2"
@@ -1326,40 +1322,35 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-#ifdef SKY2_VLAN_TAG_USED
-static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
+#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
+
+static void sky2_vlan_mode(struct net_device *dev)
{
- if (onoff) {
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u16 port = sky2->port;
+
+ if (dev->features & NETIF_F_HW_VLAN_RX)
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_VLAN_TAG_ON);
- } else {
+ else
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF);
+
+ dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
+ if (dev->features & NETIF_F_HW_VLAN_TX)
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_ON);
+ else {
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF);
+
+ /* Can't do transmit offload of vlan without hw vlan */
+ dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
+ | NETIF_F_ALL_CSUM);
}
}
-static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
- struct sky2_hw *hw = sky2->hw;
- u16 port = sky2->port;
-
- netif_tx_lock_bh(dev);
- napi_disable(&hw->napi);
-
- sky2->vlgrp = grp;
- sky2_set_vlan_mode(hw, port, grp != NULL);
-
- sky2_read32(hw, B0_Y2_SP_LISR);
- napi_enable(&hw->napi);
- netif_tx_unlock_bh(dev);
-}
-#endif
-
/* Amount of required worst case padding in rx buffer */
static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
{
@@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2)
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
sky2->tx_ring_size - 1);
-#ifdef SKY2_VLAN_TAG_USED
- sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
-#endif
+ sky2_vlan_mode(sky2->netdev);
sky2_rx_start(sky2);
}
@@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
}
ctrl = 0;
-#ifdef SKY2_VLAN_TAG_USED
+
/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
if (vlan_tx_tag_present(skb)) {
if (!le) {
@@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
le->length = cpu_to_be16(vlan_tx_tag_get(skb));
ctrl |= INS_VLAN;
}
-#endif
/* Handle TCP checksum offload */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2432,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
struct sk_buff *skb = NULL;
u16 count = (status & GMR_FS_LEN) >> 16;
-#ifdef SKY2_VLAN_TAG_USED
- /* Account for vlan tag */
- if (sky2->vlgrp && (status & GMR_FS_VLAN))
- count -= VLAN_HLEN;
-#endif
+ if (status & GMR_FS_VLAN)
+ count -= VLAN_HLEN; /* Account for vlan tag */
netif_printk(sky2, rx_status, KERN_DEBUG, dev,
"rx slot %u status 0x%x len %d\n",
@@ -2504,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
static inline void sky2_skb_rx(const struct sky2_port *sky2,
u32 status, struct sk_buff *skb)
{
-#ifdef SKY2_VLAN_TAG_USED
- u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
- if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
- if (skb->ip_summed == CHECKSUM_NONE)
- vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
- else
- vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
- vlan_tag, skb);
- return;
- }
-#endif
+ if (status & GMR_FS_VLAN)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+
if (skb->ip_summed == CHECKSUM_NONE)
netif_receive_skb(skb);
else
@@ -2631,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
goto exit_loop;
break;
-#ifdef SKY2_VLAN_TAG_USED
case OP_RXVLAN:
sky2->rx_tag = length;
break;
@@ -2639,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXCHKSVLAN:
sky2->rx_tag = length;
/* fall through */
-#endif
case OP_RXCHKS:
if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
sky2_rx_checksum(sky2, status);
@@ -3042,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
| SKY2_HW_NEW_LE
| SKY2_HW_AUTO_TX_SUM
| SKY2_HW_ADV_POWER_CTL;
+
+ /* The workaround for status conflicts VLAN tag detection. */
+ if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ hw->flags |= SKY2_HW_VLAN_BROKEN;
break;
case CHIP_ID_YUKON_SUPR:
@@ -3411,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
u32 modes = SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_TP;
+ | SUPPORTED_100baseT_Full;
if (hw->flags & SKY2_HW_GIGABIT)
modes |= SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full;
return modes;
} else
- return SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg
- | SUPPORTED_FIBRE;
+ return SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full;
}
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3436,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (sky2_is_copper(hw)) {
ecmd->port = PORT_TP;
ecmd->speed = sky2->speed;
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else {
ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
}
ecmd->advertising = sky2->advertising;
@@ -3455,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
u32 supported = sky2_supported_modes(hw);
if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->advertising & ~supported)
+ return -EINVAL;
+
+ if (sky2_is_copper(hw))
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ else
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+
sky2->flags |= SKY2_FLAG_AUTO_SPEED;
- ecmd->advertising = supported;
sky2->duplex = -1;
sky2->speed = -1;
} else {
@@ -3500,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
}
- sky2->advertising = ecmd->advertising;
-
if (netif_running(dev)) {
sky2_phy_reinit(sky2);
sky2_set_multicast(dev);
@@ -4229,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
static int sky2_set_flags(struct net_device *dev, u32 data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- u32 supported =
- (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+ unsigned long old_feat = dev->features;
+ u32 supported = 0;
int rc;
+ if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
+ supported |= ETH_FLAG_RXHASH;
+
+ if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
+ supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+
+ printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
+ supported, data);
+
rc = ethtool_op_set_flags(dev, data, supported);
if (rc)
return rc;
- rx_set_rss(dev);
+ if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
+ rx_set_rss(dev);
+
+ if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
+ sky2_vlan_mode(dev);
return 0;
}
@@ -4273,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
.set_flags = sky2_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4554,9 +4555,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
- .ndo_vlan_rx_register = sky2_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sky2_netpoll,
#endif
@@ -4572,9 +4570,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
- .ndo_vlan_rx_register = sky2_vlan_rx_register,
-#endif
},
};
@@ -4625,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->port = port;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
- | NETIF_F_TSO | NETIF_F_GRO;
+ | NETIF_F_TSO | NETIF_F_GRO;
+
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
@@ -4633,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (!(hw->flags & SKY2_HW_RSS_BROKEN))
dev->features |= NETIF_F_RXHASH;
-#ifdef SKY2_VLAN_TAG_USED
- /* The workaround for FE+ status conflicts with VLAN tag detection. */
- if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
- sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+ if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- }
-#endif
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 80bdc404f1e..6861b0e8db9 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2236,11 +2236,8 @@ struct sky2_port {
u16 rx_pending;
u16 rx_data_size;
u16 rx_nfrags;
-
-#ifdef SKY2_VLAN_TAG_USED
u16 rx_tag;
- struct vlan_group *vlgrp;
-#endif
+
struct {
unsigned long last;
u32 mac_rp;
@@ -2284,6 +2281,7 @@ struct sky2_hw {
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
#define SKY2_HW_RSS_BROKEN 0x00000100
+#define SKY2_HW_VLAN_BROKEN 0x00000200
u8 chip_id;
u8 chip_rev;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9f83e..546de574982 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) ||
- netif_needs_gso(dev, skb))) {
+ netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock);
goto drop;
}
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index ccea15c11c1..50cb1e1b4a1 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
-ps3av_mod-objs += ps3av.o ps3av_cmd.o
+ps3av_mod-y := ps3av.o ps3av_cmd.o
obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e6539cbabb3..9583cbcc6b7 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -16,6 +16,7 @@
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "rtc-core.h"
@@ -152,6 +153,18 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
spin_lock_init(&rtc->irq_task_lock);
init_waitqueue_head(&rtc->irq_queue);
+ /* Init timerqueue */
+ timerqueue_init_head(&rtc->timerqueue);
+ INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+ /* Init aie timer */
+ rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
+ /* Init uie timer */
+ rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
+ /* Init pie timer */
+ hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rtc->pie_timer.function = rtc_pie_update_irq;
+ rtc->pie_enabled = 0;
+
strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
dev_set_name(&rtc->dev, "rtc%d", id);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c816238aa..90384b9f6b2 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -14,15 +14,11 @@
#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/log2.h>
+#include <linux/workqueue.h>
-int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
+static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
{
int err;
-
- err = mutex_lock_interruptible(&rtc->ops_lock);
- if (err)
- return err;
-
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->read_time)
@@ -31,7 +27,18 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
memset(tm, 0, sizeof(struct rtc_time));
err = rtc->ops->read_time(rtc->dev.parent, tm);
}
+ return err;
+}
+
+int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
+{
+ int err;
+ err = mutex_lock_interruptible(&rtc->ops_lock);
+ if (err)
+ return err;
+
+ err = __rtc_read_time(rtc, tm);
mutex_unlock(&rtc->ops_lock);
return err;
}
@@ -106,188 +113,54 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
}
EXPORT_SYMBOL_GPL(rtc_set_mmss);
-static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
int err;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
-
- if (rtc->ops == NULL)
- err = -ENODEV;
- else if (!rtc->ops->read_alarm)
- err = -EINVAL;
- else {
- memset(alarm, 0, sizeof(struct rtc_wkalrm));
- err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
- }
-
+ alarm->enabled = rtc->aie_timer.enabled;
+ if (alarm->enabled)
+ alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
mutex_unlock(&rtc->ops_lock);
- return err;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(rtc_read_alarm);
-int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
+ struct rtc_time tm;
+ long now, scheduled;
int err;
- struct rtc_time before, now;
- int first_time = 1;
- unsigned long t_now, t_alm;
- enum { none, day, month, year } missing = none;
- unsigned days;
-
- /* The lower level RTC driver may return -1 in some fields,
- * creating invalid alarm->time values, for reasons like:
- *
- * - The hardware may not be capable of filling them in;
- * many alarms match only on time-of-day fields, not
- * day/month/year calendar data.
- *
- * - Some hardware uses illegal values as "wildcard" match
- * values, which non-Linux firmware (like a BIOS) may try
- * to set up as e.g. "alarm 15 minutes after each hour".
- * Linux uses only oneshot alarms.
- *
- * When we see that here, we deal with it by using values from
- * a current RTC timestamp for any missing (-1) values. The
- * RTC driver prevents "periodic alarm" modes.
- *
- * But this can be racey, because some fields of the RTC timestamp
- * may have wrapped in the interval since we read the RTC alarm,
- * which would lead to us inserting inconsistent values in place
- * of the -1 fields.
- *
- * Reading the alarm and timestamp in the reverse sequence
- * would have the same race condition, and not solve the issue.
- *
- * So, we must first read the RTC timestamp,
- * then read the RTC alarm value,
- * and then read a second RTC timestamp.
- *
- * If any fields of the second timestamp have changed
- * when compared with the first timestamp, then we know
- * our timestamp may be inconsistent with that used by
- * the low-level rtc_read_alarm_internal() function.
- *
- * So, when the two timestamps disagree, we just loop and do
- * the process again to get a fully consistent set of values.
- *
- * This could all instead be done in the lower level driver,
- * but since more than one lower level RTC implementation needs it,
- * then it's probably best best to do it here instead of there..
- */
- /* Get the "before" timestamp */
- err = rtc_read_time(rtc, &before);
- if (err < 0)
+ err = rtc_valid_tm(&alarm->time);
+ if (err)
return err;
- do {
- if (!first_time)
- memcpy(&before, &now, sizeof(struct rtc_time));
- first_time = 0;
-
- /* get the RTC alarm values, which may be incomplete */
- err = rtc_read_alarm_internal(rtc, alarm);
- if (err)
- return err;
- if (!alarm->enabled)
- return 0;
-
- /* full-function RTCs won't have such missing fields */
- if (rtc_valid_tm(&alarm->time) == 0)
- return 0;
-
- /* get the "after" timestamp, to detect wrapped fields */
- err = rtc_read_time(rtc, &now);
- if (err < 0)
- return err;
-
- /* note that tm_sec is a "don't care" value here: */
- } while ( before.tm_min != now.tm_min
- || before.tm_hour != now.tm_hour
- || before.tm_mon != now.tm_mon
- || before.tm_year != now.tm_year);
-
- /* Fill in the missing alarm fields using the timestamp; we
- * know there's at least one since alarm->time is invalid.
- */
- if (alarm->time.tm_sec == -1)
- alarm->time.tm_sec = now.tm_sec;
- if (alarm->time.tm_min == -1)
- alarm->time.tm_min = now.tm_min;
- if (alarm->time.tm_hour == -1)
- alarm->time.tm_hour = now.tm_hour;
-
- /* For simplicity, only support date rollover for now */
- if (alarm->time.tm_mday == -1) {
- alarm->time.tm_mday = now.tm_mday;
- missing = day;
- }
- if (alarm->time.tm_mon == -1) {
- alarm->time.tm_mon = now.tm_mon;
- if (missing == none)
- missing = month;
- }
- if (alarm->time.tm_year == -1) {
- alarm->time.tm_year = now.tm_year;
- if (missing == none)
- missing = year;
- }
-
- /* with luck, no rollover is needed */
- rtc_tm_to_time(&now, &t_now);
- rtc_tm_to_time(&alarm->time, &t_alm);
- if (t_now < t_alm)
- goto done;
-
- switch (missing) {
+ rtc_tm_to_time(&alarm->time, &scheduled);
- /* 24 hour rollover ... if it's now 10am Monday, an alarm that
- * that will trigger at 5am will do so at 5am Tuesday, which
- * could also be in the next month or year. This is a common
- * case, especially for PCs.
- */
- case day:
- dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
- t_alm += 24 * 60 * 60;
- rtc_time_to_tm(t_alm, &alarm->time);
- break;
-
- /* Month rollover ... if it's the 31th, an alarm on the 3rd will
- * be next month. An alarm matching on the 30th, 29th, or 28th
- * may end up in the month after that! Many newer PCs support
- * this type of alarm.
+ /* Make sure we're not setting alarms in the past */
+ err = __rtc_read_time(rtc, &tm);
+ rtc_tm_to_time(&tm, &now);
+ if (scheduled <= now)
+ return -ETIME;
+ /*
+ * XXX - We just checked to make sure the alarm time is not
+ * in the past, but there is still a race window where if
+ * the is alarm set for the next second and the second ticks
+ * over right here, before we set the alarm.
*/
- case month:
- dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
- do {
- if (alarm->time.tm_mon < 11)
- alarm->time.tm_mon++;
- else {
- alarm->time.tm_mon = 0;
- alarm->time.tm_year++;
- }
- days = rtc_month_days(alarm->time.tm_mon,
- alarm->time.tm_year);
- } while (days < alarm->time.tm_mday);
- break;
-
- /* Year rollover ... easy except for leap years! */
- case year:
- dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
- do {
- alarm->time.tm_year++;
- } while (rtc_valid_tm(&alarm->time) != 0);
- break;
-
- default:
- dev_warn(&rtc->dev, "alarm rollover not handled\n");
- }
-done:
- return 0;
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(rtc_read_alarm);
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
@@ -300,16 +173,18 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
-
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
+ if (rtc->aie_timer.enabled) {
+ rtc_timer_remove(rtc, &rtc->aie_timer);
+ rtc->aie_timer.enabled = 0;
+ }
+ rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+ rtc->aie_timer.period = ktime_set(0, 0);
+ if (alarm->enabled) {
+ rtc->aie_timer.enabled = 1;
+ rtc_timer_enqueue(rtc, &rtc->aie_timer);
+ }
mutex_unlock(&rtc->ops_lock);
- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
@@ -319,6 +194,16 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (err)
return err;
+ if (rtc->aie_timer.enabled != enabled) {
+ if (enabled) {
+ rtc->aie_timer.enabled = 1;
+ rtc_timer_enqueue(rtc, &rtc->aie_timer);
+ } else {
+ rtc_timer_remove(rtc, &rtc->aie_timer);
+ rtc->aie_timer.enabled = 0;
+ }
+ }
+
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->alarm_irq_enable)
@@ -337,52 +222,53 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (err)
return err;
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- if (enabled == 0 && rtc->uie_irq_active) {
- mutex_unlock(&rtc->ops_lock);
- return rtc_dev_update_irq_enable_emul(rtc, enabled);
+ /* make sure we're changing state */
+ if (rtc->uie_rtctimer.enabled == enabled)
+ goto out;
+
+ if (enabled) {
+ struct rtc_time tm;
+ ktime_t now, onesec;
+
+ __rtc_read_time(rtc, &tm);
+ onesec = ktime_set(1, 0);
+ now = rtc_tm_to_ktime(tm);
+ rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
+ rtc->uie_rtctimer.period = ktime_set(1, 0);
+ rtc->uie_rtctimer.enabled = 1;
+ rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+ } else {
+ rtc_timer_remove(rtc, &rtc->uie_rtctimer);
+ rtc->uie_rtctimer.enabled = 0;
}
-#endif
-
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->update_irq_enable)
- err = -EINVAL;
- else
- err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
+out:
mutex_unlock(&rtc->ops_lock);
-
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- /*
- * Enable emulation if the driver did not provide
- * the update_irq_enable function pointer or if returned
- * -EINVAL to signal that it has been configured without
- * interrupts or that are not available at the moment.
- */
- if (err == -EINVAL)
- err = rtc_dev_update_irq_enable_emul(rtc, enabled);
-#endif
return err;
+
}
EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
+
/**
- * rtc_update_irq - report RTC periodic, alarm, and/or update irqs
- * @rtc: the rtc device
- * @num: how many irqs are being reported (usually one)
- * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
- * Context: any
+ * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
+ * @rtc: pointer to the rtc device
+ *
+ * This function is called when an AIE, UIE or PIE mode interrupt
+ * has occured (or been emulated).
+ *
+ * Triggers the registered irq_task function callback.
*/
-void rtc_update_irq(struct rtc_device *rtc,
- unsigned long num, unsigned long events)
+static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
{
unsigned long flags;
+ /* mark one irq of the appropriate mode */
spin_lock_irqsave(&rtc->irq_lock, flags);
- rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
+ rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
spin_unlock_irqrestore(&rtc->irq_lock, flags);
+ /* call the task func */
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task)
rtc->irq_task->func(rtc->irq_task->private_data);
@@ -391,6 +277,69 @@ void rtc_update_irq(struct rtc_device *rtc,
wake_up_interruptible(&rtc->irq_queue);
kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
}
+
+
+/**
+ * rtc_aie_update_irq - AIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the aie_timer expires.
+ */
+void rtc_aie_update_irq(void *private)
+{
+ struct rtc_device *rtc = (struct rtc_device *)private;
+ rtc_handle_legacy_irq(rtc, 1, RTC_AF);
+}
+
+
+/**
+ * rtc_uie_update_irq - UIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the uie_timer expires.
+ */
+void rtc_uie_update_irq(void *private)
+{
+ struct rtc_device *rtc = (struct rtc_device *)private;
+ rtc_handle_legacy_irq(rtc, 1, RTC_UF);
+}
+
+
+/**
+ * rtc_pie_update_irq - PIE mode hrtimer hook
+ * @timer: pointer to the pie mode hrtimer
+ *
+ * This function is used to emulate PIE mode interrupts
+ * using an hrtimer. This function is called when the periodic
+ * hrtimer expires.
+ */
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
+{
+ struct rtc_device *rtc;
+ ktime_t period;
+ int count;
+ rtc = container_of(timer, struct rtc_device, pie_timer);
+
+ period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ count = hrtimer_forward_now(timer, period);
+
+ rtc_handle_legacy_irq(rtc, count, RTC_PF);
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * rtc_update_irq - Triggered when a RTC interrupt occurs.
+ * @rtc: the rtc device
+ * @num: how many irqs are being reported (usually one)
+ * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
+ * Context: any
+ */
+void rtc_update_irq(struct rtc_device *rtc,
+ unsigned long num, unsigned long events)
+{
+ schedule_work(&rtc->irqwork);
+}
EXPORT_SYMBOL_GPL(rtc_update_irq);
static int __rtc_match(struct device *dev, void *data)
@@ -477,18 +426,20 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled
int err = 0;
unsigned long flags;
- if (rtc->ops->irq_set_state == NULL)
- return -ENXIO;
-
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
- if (err == 0)
- err = rtc->ops->irq_set_state(rtc->dev.parent, enabled);
+ if (enabled) {
+ ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
+ } else {
+ hrtimer_cancel(&rtc->pie_timer);
+ }
+ rtc->pie_enabled = enabled;
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
return err;
}
@@ -509,21 +460,194 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
int err = 0;
unsigned long flags;
- if (rtc->ops->irq_set_freq == NULL)
- return -ENXIO;
-
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
if (err == 0) {
- err = rtc->ops->irq_set_freq(rtc->dev.parent, freq);
- if (err == 0)
- rtc->irq_freq = freq;
+ rtc->irq_freq = freq;
+ if (rtc->pie_enabled) {
+ ktime_t period;
+ hrtimer_cancel(&rtc->pie_timer);
+ period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ hrtimer_start(&rtc->pie_timer, period,
+ HRTIMER_MODE_REL);
+ }
}
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
+
+/**
+ * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being added.
+ *
+ * Enqueues a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+ timerqueue_add(&rtc->timerqueue, &timer->node);
+ if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+ struct rtc_wkalrm alarm;
+ int err;
+ alarm.time = rtc_ktime_to_tm(timer->node.expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ schedule_work(&rtc->irqwork);
+ }
+}
+
+/**
+ * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Removes a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+ timerqueue_del(&rtc->timerqueue, &timer->node);
+
+ if (next == &timer->node) {
+ struct rtc_wkalrm alarm;
+ int err;
+ next = timerqueue_getnext(&rtc->timerqueue);
+ if (!next)
+ return;
+ alarm.time = rtc_ktime_to_tm(next->expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ schedule_work(&rtc->irqwork);
+ }
+}
+
+/**
+ * rtc_timer_do_work - Expires rtc timers
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Expires rtc timers. Reprograms next alarm event if needed.
+ * Called via worktask.
+ *
+ * Serializes access to timerqueue via ops_lock mutex
+ */
+void rtc_timer_do_work(struct work_struct *work)
+{
+ struct rtc_timer *timer;
+ struct timerqueue_node *next;
+ ktime_t now;
+ struct rtc_time tm;
+
+ struct rtc_device *rtc =
+ container_of(work, struct rtc_device, irqwork);
+
+ mutex_lock(&rtc->ops_lock);
+again:
+ __rtc_read_time(rtc, &tm);
+ now = rtc_tm_to_ktime(tm);
+ while ((next = timerqueue_getnext(&rtc->timerqueue))) {
+ if (next->expires.tv64 > now.tv64)
+ break;
+
+ /* expire timer */
+ timer = container_of(next, struct rtc_timer, node);
+ timerqueue_del(&rtc->timerqueue, &timer->node);
+ timer->enabled = 0;
+ if (timer->task.func)
+ timer->task.func(timer->task.private_data);
+
+ /* Re-add/fwd periodic timers */
+ if (ktime_to_ns(timer->period)) {
+ timer->node.expires = ktime_add(timer->node.expires,
+ timer->period);
+ timer->enabled = 1;
+ timerqueue_add(&rtc->timerqueue, &timer->node);
+ }
+ }
+
+ /* Set next alarm */
+ if (next) {
+ struct rtc_wkalrm alarm;
+ int err;
+ alarm.time = rtc_ktime_to_tm(next->expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ goto again;
+ }
+
+ mutex_unlock(&rtc->ops_lock);
+}
+
+
+/* rtc_timer_init - Initializes an rtc_timer
+ * @timer: timer to be intiialized
+ * @f: function pointer to be called when timer fires
+ * @data: private data passed to function pointer
+ *
+ * Kernel interface to initializing an rtc_timer.
+ */
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
+{
+ timerqueue_init(&timer->node);
+ timer->enabled = 0;
+ timer->task.func = f;
+ timer->task.private_data = data;
+}
+
+/* rtc_timer_start - Sets an rtc_timer to fire in the future
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ * @ expires: time at which to expire the timer
+ * @ period: period that the timer will recur
+ *
+ * Kernel interface to set an rtc_timer
+ */
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+ ktime_t expires, ktime_t period)
+{
+ int ret = 0;
+ mutex_lock(&rtc->ops_lock);
+ if (timer->enabled)
+ rtc_timer_remove(rtc, timer);
+
+ timer->node.expires = expires;
+ timer->period = period;
+
+ timer->enabled = 1;
+ rtc_timer_enqueue(rtc, timer);
+
+ mutex_unlock(&rtc->ops_lock);
+ return ret;
+}
+
+/* rtc_timer_cancel - Stops an rtc_timer
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ *
+ * Kernel interface to cancel an rtc_timer
+ */
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
+{
+ int ret = 0;
+ mutex_lock(&rtc->ops_lock);
+ if (timer->enabled)
+ rtc_timer_remove(rtc, timer);
+ timer->enabled = 0;
+ mutex_unlock(&rtc->ops_lock);
+ return ret;
+}
+
+
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5856167a0c9..7e6ce626b7f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -687,7 +687,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
#if defined(CONFIG_ATARI)
address_space = 64;
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
- || defined(__sparc__) || defined(__mips__)
+ || defined(__sparc__) || defined(__mips__) \
+ || defined(__powerpc__)
address_space = 128;
#else
#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0cc0984d155..212b16edafc 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,105 +46,6 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
return err;
}
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-/*
- * Routine to poll RTC seconds field for change as often as possible,
- * after first RTC_UIE use timer to reduce polling
- */
-static void rtc_uie_task(struct work_struct *work)
-{
- struct rtc_device *rtc =
- container_of(work, struct rtc_device, uie_task);
- struct rtc_time tm;
- int num = 0;
- int err;
-
- err = rtc_read_time(rtc, &tm);
-
- spin_lock_irq(&rtc->irq_lock);
- if (rtc->stop_uie_polling || err) {
- rtc->uie_task_active = 0;
- } else if (rtc->oldsecs != tm.tm_sec) {
- num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
- rtc->oldsecs = tm.tm_sec;
- rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
- rtc->uie_timer_active = 1;
- rtc->uie_task_active = 0;
- add_timer(&rtc->uie_timer);
- } else if (schedule_work(&rtc->uie_task) == 0) {
- rtc->uie_task_active = 0;
- }
- spin_unlock_irq(&rtc->irq_lock);
- if (num)
- rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
-}
-static void rtc_uie_timer(unsigned long data)
-{
- struct rtc_device *rtc = (struct rtc_device *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc->irq_lock, flags);
- rtc->uie_timer_active = 0;
- rtc->uie_task_active = 1;
- if ((schedule_work(&rtc->uie_task) == 0))
- rtc->uie_task_active = 0;
- spin_unlock_irqrestore(&rtc->irq_lock, flags);
-}
-
-static int clear_uie(struct rtc_device *rtc)
-{
- spin_lock_irq(&rtc->irq_lock);
- if (rtc->uie_irq_active) {
- rtc->stop_uie_polling = 1;
- if (rtc->uie_timer_active) {
- spin_unlock_irq(&rtc->irq_lock);
- del_timer_sync(&rtc->uie_timer);
- spin_lock_irq(&rtc->irq_lock);
- rtc->uie_timer_active = 0;
- }
- if (rtc->uie_task_active) {
- spin_unlock_irq(&rtc->irq_lock);
- flush_work_sync(&rtc->uie_task);
- spin_lock_irq(&rtc->irq_lock);
- }
- rtc->uie_irq_active = 0;
- }
- spin_unlock_irq(&rtc->irq_lock);
- return 0;
-}
-
-static int set_uie(struct rtc_device *rtc)
-{
- struct rtc_time tm;
- int err;
-
- err = rtc_read_time(rtc, &tm);
- if (err)
- return err;
- spin_lock_irq(&rtc->irq_lock);
- if (!rtc->uie_irq_active) {
- rtc->uie_irq_active = 1;
- rtc->stop_uie_polling = 0;
- rtc->oldsecs = tm.tm_sec;
- rtc->uie_task_active = 1;
- if (schedule_work(&rtc->uie_task) == 0)
- rtc->uie_task_active = 0;
- }
- rtc->irq_data = 0;
- spin_unlock_irq(&rtc->irq_lock);
- return 0;
-}
-
-int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
-{
- if (enabled)
- return set_uie(rtc);
- else
- return clear_uie(rtc);
-}
-EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
-
-#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
static ssize_t
rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -493,11 +394,6 @@ void rtc_dev_prepare(struct rtc_device *rtc)
rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- INIT_WORK(&rtc->uie_task, rtc_uie_task);
- setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
-#endif
-
cdev_init(&rtc->char_dev, &rtc_dev_fops);
rtc->char_dev.owner = rtc->owner;
}
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index d827ce570a8..0d559b6416d 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -106,9 +106,9 @@ struct ds1307 {
struct i2c_client *client;
struct rtc_device *rtc;
struct work_struct work;
- s32 (*read_block_data)(struct i2c_client *client, u8 command,
+ s32 (*read_block_data)(const struct i2c_client *client, u8 command,
u8 length, u8 *values);
- s32 (*write_block_data)(struct i2c_client *client, u8 command,
+ s32 (*write_block_data)(const struct i2c_client *client, u8 command,
u8 length, const u8 *values);
};
@@ -158,8 +158,8 @@ MODULE_DEVICE_TABLE(i2c, ds1307_id);
#define BLOCK_DATA_MAX_TRIES 10
-static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command,
- u8 length, u8 *values)
+static s32 ds1307_read_block_data_once(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values)
{
s32 i, data;
@@ -172,7 +172,7 @@ static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command,
return i;
}
-static s32 ds1307_read_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command,
u8 length, u8 *values)
{
u8 oldvalues[I2C_SMBUS_BLOCK_MAX];
@@ -198,7 +198,7 @@ static s32 ds1307_read_block_data(struct i2c_client *client, u8 command,
return length;
}
-static s32 ds1307_write_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
u8 currvalues[I2C_SMBUS_BLOCK_MAX];
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 773851f338b..075f1708dea 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,32 @@ int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
}
EXPORT_SYMBOL(rtc_tm_to_time);
+/*
+ * Convert rtc_time to ktime
+ */
+ktime_t rtc_tm_to_ktime(struct rtc_time tm)
+{
+ time_t time;
+ rtc_tm_to_time(&tm, &time);
+ return ktime_set(time, 0);
+}
+EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
+
+/*
+ * Convert ktime to rtc_time
+ */
+struct rtc_time rtc_ktime_to_tm(ktime_t kt)
+{
+ struct timespec ts;
+ struct rtc_time ret;
+
+ ts = ktime_to_timespec(kt);
+ /* Round up any ns */
+ if (ts.tv_nsec)
+ ts.tv_sec++;
+ rtc_time_to_tm(ts.tv_sec, &ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 188aff6d263..c1df7676a73 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -776,24 +776,7 @@ config BFIN_UART0_CTSRTS
bool "Enable UART0 hardware flow control"
depends on SERIAL_BFIN_UART0
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART0_CTS_PIN
- int "UART0 CTS pin"
- depends on BFIN_UART0_CTSRTS && !BF548
- default 23
- help
- The default pin is GPIO_GP7.
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART0_RTS_PIN
- int "UART0 RTS pin"
- depends on BFIN_UART0_CTSRTS && !BF548
- default 22
- help
- The default pin is GPIO_GP6.
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_BFIN_UART1
bool "Enable UART1"
@@ -805,22 +788,7 @@ config BFIN_UART1_CTSRTS
bool "Enable UART1 hardware flow control"
depends on SERIAL_BFIN_UART1
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART1_CTS_PIN
- int "UART1 CTS pin"
- depends on BFIN_UART1_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART1_RTS_PIN
- int "UART1 RTS pin"
- depends on BFIN_UART1_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_BFIN_UART2
bool "Enable UART2"
@@ -832,22 +800,7 @@ config BFIN_UART2_CTSRTS
bool "Enable UART2 hardware flow control"
depends on SERIAL_BFIN_UART2
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART2_CTS_PIN
- int "UART2 CTS pin"
- depends on BFIN_UART2_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART2_RTS_PIN
- int "UART2 RTS pin"
- depends on BFIN_UART2_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_BFIN_UART3
bool "Enable UART3"
@@ -859,22 +812,7 @@ config BFIN_UART3_CTSRTS
bool "Enable UART3 hardware flow control"
depends on SERIAL_BFIN_UART3
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART3_CTS_PIN
- int "UART3 CTS pin"
- depends on BFIN_UART3_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART3_RTS_PIN
- int "UART3 RTS pin"
- depends on BFIN_UART3_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_IMX
bool "IMX serial port support"
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 19cac9f610f..e381b895b04 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -1,7 +1,7 @@
/*
* Blackfin On-Chip Serial Driver
*
- * Copyright 2006-2008 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -12,6 +12,9 @@
#define SUPPORT_SYSRQ
#endif
+#define DRIVER_NAME "bfin-uart"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/gfp.h>
@@ -23,21 +26,20 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
-#include <linux/dma-mapping.h>
-
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+#include <linux/gpio.h>
+#include <linux/irq.h>
#include <linux/kgdb.h>
-#include <asm/irq_regs.h>
-#endif
-
-#include <asm/gpio.h>
-#include <mach/bfin_serial_5xx.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
+#include <asm/portmux.h>
#include <asm/cacheflush.h>
+#include <asm/dma.h>
+
+#define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase)
+#define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr)
+#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
+#include <asm/bfin_serial.h>
#ifdef CONFIG_SERIAL_BFIN_MODULE
# undef CONFIG_EARLY_PRINTK
@@ -48,12 +50,11 @@
#endif
/* UART name and device definitions */
-#define BFIN_SERIAL_NAME "ttyBF"
+#define BFIN_SERIAL_DEV_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
#define BFIN_SERIAL_MINOR 64
-static struct bfin_serial_port bfin_serial_ports[BFIN_UART_NR_PORTS];
-static int nr_active_ports = ARRAY_SIZE(bfin_serial_resource);
+static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -743,14 +744,14 @@ static int bfin_serial_startup(struct uart_port *port)
}
}
if (uart->rts_pin >= 0) {
- gpio_request(uart->rts_pin, DRIVER_NAME);
gpio_direction_output(uart->rts_pin, 0);
}
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (request_irq(uart->status_irq,
+ if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
bfin_serial_mctrl_cts_int,
IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ uart->cts_pin = -1;
pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
}
@@ -796,11 +797,9 @@ static void bfin_serial_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (UART_GET_IER(uart) & EDSSI)
+ if (uart->cts_pin >= 0)
free_irq(uart->status_irq, uart);
#endif
}
@@ -962,33 +961,33 @@ bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
*/
static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
{
- int line = port->line;
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned short val;
switch (ld) {
case N_IRDA:
- val = UART_GET_GCTL(&bfin_serial_ports[line]);
+ val = UART_GET_GCTL(uart);
val |= (IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
break;
default:
- val = UART_GET_GCTL(&bfin_serial_ports[line]);
+ val = UART_GET_GCTL(uart);
val &= ~(IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
}
}
static void bfin_serial_reset_irda(struct uart_port *port)
{
- int line = port->line;
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned short val;
- val = UART_GET_GCTL(&bfin_serial_ports[line]);
+ val = UART_GET_GCTL(uart);
val &= ~(IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
SSYNC();
val |= (IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
SSYNC();
}
@@ -1070,85 +1069,6 @@ static struct uart_ops bfin_serial_pops = {
#endif
};
-static void __init bfin_serial_hw_init(void)
-{
-#ifdef CONFIG_SERIAL_BFIN_UART0
- peripheral_request(P_UART0_TX, DRIVER_NAME);
- peripheral_request(P_UART0_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART1
- peripheral_request(P_UART1_TX, DRIVER_NAME);
- peripheral_request(P_UART1_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART1_CTSRTS) && defined(CONFIG_BF54x)
- peripheral_request(P_UART1_RTS, DRIVER_NAME);
- peripheral_request(P_UART1_CTS, DRIVER_NAME);
-# endif
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART2
- peripheral_request(P_UART2_TX, DRIVER_NAME);
- peripheral_request(P_UART2_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART3
- peripheral_request(P_UART3_TX, DRIVER_NAME);
- peripheral_request(P_UART3_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART3_CTSRTS) && defined(CONFIG_BF54x)
- peripheral_request(P_UART3_RTS, DRIVER_NAME);
- peripheral_request(P_UART3_CTS, DRIVER_NAME);
-# endif
-#endif
-}
-
-static void __init bfin_serial_init_ports(void)
-{
- static int first = 1;
- int i;
-
- if (!first)
- return;
- first = 0;
-
- bfin_serial_hw_init();
-
- for (i = 0; i < nr_active_ports; i++) {
- spin_lock_init(&bfin_serial_ports[i].port.lock);
- bfin_serial_ports[i].port.uartclk = get_sclk();
- bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
- bfin_serial_ports[i].port.ops = &bfin_serial_pops;
- bfin_serial_ports[i].port.line = i;
- bfin_serial_ports[i].port.iotype = UPIO_MEM;
- bfin_serial_ports[i].port.membase =
- (void __iomem *)bfin_serial_resource[i].uart_base_addr;
- bfin_serial_ports[i].port.mapbase =
- bfin_serial_resource[i].uart_base_addr;
- bfin_serial_ports[i].port.irq =
- bfin_serial_resource[i].uart_irq;
- bfin_serial_ports[i].status_irq =
- bfin_serial_resource[i].uart_status_irq;
- bfin_serial_ports[i].port.flags = UPF_BOOT_AUTOCONF;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- bfin_serial_ports[i].tx_done = 1;
- bfin_serial_ports[i].tx_count = 0;
- bfin_serial_ports[i].tx_dma_channel =
- bfin_serial_resource[i].uart_tx_dma_channel;
- bfin_serial_ports[i].rx_dma_channel =
- bfin_serial_resource[i].uart_rx_dma_channel;
- init_timer(&(bfin_serial_ports[i].rx_dma_timer));
-#endif
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
- bfin_serial_ports[i].cts_pin =
- bfin_serial_resource[i].uart_cts_pin;
- bfin_serial_ports[i].rts_pin =
- bfin_serial_resource[i].uart_rts_pin;
-#endif
- }
-}
-
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
/*
* If the port was already initialised (eg, by a boot loader),
@@ -1196,6 +1116,34 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
static struct uart_driver bfin_serial_reg;
+static void bfin_serial_console_putchar(struct uart_port *port, int ch)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ while (!(UART_GET_LSR(uart) & THRE))
+ barrier();
+ UART_PUT_CHAR(uart, ch);
+}
+
+#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
+ defined (CONFIG_EARLY_PRINTK) */
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+#define CLASS_BFIN_CONSOLE "bfin-console"
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct bfin_serial_port *uart = bfin_serial_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&uart->port.lock, flags);
+ uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&uart->port.lock, flags);
+
+}
+
static int __init
bfin_serial_console_setup(struct console *co, char *options)
{
@@ -1215,9 +1163,12 @@ bfin_serial_console_setup(struct console *co, char *options)
* if so, search for the first available port that does have
* console support.
*/
- if (co->index == -1 || co->index >= nr_active_ports)
- co->index = 0;
- uart = &bfin_serial_ports[co->index];
+ if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
+ return -ENODEV;
+
+ uart = bfin_serial_ports[co->index];
+ if (!uart)
+ return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1226,36 +1177,9 @@ bfin_serial_console_setup(struct console *co, char *options)
return uart_set_options(&uart->port, co, baud, parity, bits, flow);
}
-#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
- defined (CONFIG_EARLY_PRINTK) */
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
-static void bfin_serial_console_putchar(struct uart_port *port, int ch)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- while (!(UART_GET_LSR(uart) & THRE))
- barrier();
- UART_PUT_CHAR(uart, ch);
- SSYNC();
-}
-
-/*
- * Interrupts are disabled on entering
- */
-static void
-bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct bfin_serial_port *uart = &bfin_serial_ports[co->index];
- unsigned long flags;
-
- spin_lock_irqsave(&uart->port.lock, flags);
- uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
- spin_unlock_irqrestore(&uart->port.lock, flags);
-
-}
static struct console bfin_serial_console = {
- .name = BFIN_SERIAL_NAME,
+ .name = BFIN_SERIAL_DEV_NAME,
.write = bfin_serial_console_write,
.device = uart_console_device,
.setup = bfin_serial_console_setup,
@@ -1263,44 +1187,30 @@ static struct console bfin_serial_console = {
.index = -1,
.data = &bfin_serial_reg,
};
-
-static int __init bfin_serial_rs_console_init(void)
-{
- bfin_serial_init_ports();
- register_console(&bfin_serial_console);
-
- return 0;
-}
-console_initcall(bfin_serial_rs_console_init);
-
#define BFIN_SERIAL_CONSOLE &bfin_serial_console
#else
#define BFIN_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
+#ifdef CONFIG_EARLY_PRINTK
+static struct bfin_serial_port bfin_earlyprintk_port;
+#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
-#ifdef CONFIG_EARLY_PRINTK
-static __init void early_serial_putc(struct uart_port *port, int ch)
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
{
- unsigned timeout = 0xffff;
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
- while ((!(UART_GET_LSR(uart) & THRE)) && --timeout)
- cpu_relax();
- UART_PUT_CHAR(uart, ch);
-}
+ unsigned long flags;
-static __init void early_serial_write(struct console *con, const char *s,
- unsigned int n)
-{
- struct bfin_serial_port *uart = &bfin_serial_ports[con->index];
- unsigned int i;
+ if (bfin_earlyprintk_port.port.line != co->index)
+ return;
- for (i = 0; i < n; i++, s++) {
- if (*s == '\n')
- early_serial_putc(&uart->port, '\r');
- early_serial_putc(&uart->port, *s);
- }
+ spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
+ uart_console_write(&bfin_earlyprintk_port.port, s, count,
+ bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
}
/*
@@ -1311,113 +1221,204 @@ static __init void early_serial_write(struct console *con, const char *s,
*/
static struct __initdata console bfin_early_serial_console = {
.name = "early_BFuart",
- .write = early_serial_write,
+ .write = bfin_earlyprintk_console_write,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bfin_serial_reg,
};
-
-struct console __init *bfin_earlyserial_init(unsigned int port,
- unsigned int cflag)
-{
- struct bfin_serial_port *uart;
- struct ktermios t;
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
- /*
- * If we are using early serial, don't let the normal console rewind
- * log buffer, since that causes things to be printed multiple times
- */
- bfin_serial_console.flags &= ~CON_PRINTBUFFER;
#endif
- if (port == -1 || port >= nr_active_ports)
- port = 0;
- bfin_serial_init_ports();
- bfin_early_serial_console.index = port;
- uart = &bfin_serial_ports[port];
- t.c_cflag = cflag;
- t.c_iflag = 0;
- t.c_oflag = 0;
- t.c_lflag = ICANON;
- t.c_line = port;
- bfin_serial_set_termios(&uart->port, &t, &t);
- return &bfin_early_serial_console;
-}
-
-#endif /* CONFIG_EARLY_PRINTK */
-
static struct uart_driver bfin_serial_reg = {
.owner = THIS_MODULE,
- .driver_name = "bfin-uart",
- .dev_name = BFIN_SERIAL_NAME,
+ .driver_name = DRIVER_NAME,
+ .dev_name = BFIN_SERIAL_DEV_NAME,
.major = BFIN_SERIAL_MAJOR,
.minor = BFIN_SERIAL_MINOR,
.nr = BFIN_UART_NR_PORTS,
.cons = BFIN_SERIAL_CONSOLE,
};
-static int bfin_serial_suspend(struct platform_device *dev, pm_message_t state)
+static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
{
- int i;
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
- for (i = 0; i < nr_active_ports; i++) {
- if (bfin_serial_ports[i].port.dev != &dev->dev)
- continue;
- uart_suspend_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
- }
+ return uart_suspend_port(&bfin_serial_reg, &uart->port);
+}
- return 0;
+static int bfin_serial_resume(struct platform_device *pdev)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ return uart_resume_port(&bfin_serial_reg, &uart->port);
}
-static int bfin_serial_resume(struct platform_device *dev)
+static int bfin_serial_probe(struct platform_device *pdev)
{
- int i;
+ struct resource *res;
+ struct bfin_serial_port *uart = NULL;
+ int ret = 0;
- for (i = 0; i < nr_active_ports; i++) {
- if (bfin_serial_ports[i].port.dev != &dev->dev)
- continue;
- uart_resume_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
+ return -ENOENT;
}
- return 0;
-}
+ if (bfin_serial_ports[pdev->id] == NULL) {
-static int bfin_serial_probe(struct platform_device *dev)
-{
- struct resource *res = dev->resource;
- int i;
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart) {
+ dev_err(&pdev->dev,
+ "fail to malloc bfin_serial_port\n");
+ return -ENOMEM;
+ }
+ bfin_serial_ports[pdev->id] = uart;
- for (i = 0; i < dev->num_resources; i++, res++)
- if (res->flags & IORESOURCE_MEM)
- break;
+#ifdef CONFIG_EARLY_PRINTK
+ if (!(bfin_earlyprintk_port.port.membase
+ && bfin_earlyprintk_port.port.line == pdev->id)) {
+ /*
+ * If the peripheral PINs of current port is allocated
+ * in earlyprintk probe stage, don't do it again.
+ */
+#endif
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ goto out_error_free_mem;
+ }
+#ifdef CONFIG_EARLY_PRINTK
+ }
+#endif
+
+ spin_lock_init(&uart->port.lock);
+ uart->port.uartclk = get_sclk();
+ uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ uart->port.ops = &bfin_serial_pops;
+ uart->port.line = pdev->id;
+ uart->port.iotype = UPIO_MEM;
+ uart->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ uart->port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!uart->port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ uart->port.mapbase = res->start;
- if (i < dev->num_resources) {
- for (i = 0; i < nr_active_ports; i++, res++) {
- if (bfin_serial_ports[i].port.mapbase != res->start)
- continue;
- bfin_serial_ports[i].port.dev = &dev->dev;
- uart_add_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
+ uart->port.irq = platform_get_irq(pdev, 0);
+ if (uart->port.irq < 0) {
+ dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
}
+
+ uart->status_irq = platform_get_irq(pdev, 1);
+ if (uart->status_irq < 0) {
+ dev_err(&pdev->dev, "No uart status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ uart->tx_done = 1;
+ uart->tx_count = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->tx_dma_channel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->rx_dma_channel = res->start;
+
+ init_timer(&(uart->rx_dma_timer));
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ uart->cts_pin = -1;
+ else
+ uart->cts_pin = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (res == NULL)
+ uart->rts_pin = -1;
+ else
+ uart->rts_pin = res->start;
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
+ if (uart->rts_pin >= 0)
+ gpio_request(uart->rts_pin, DRIVER_NAME);
+# endif
+#endif
}
- return 0;
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ uart = bfin_serial_ports[pdev->id];
+ uart->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, uart);
+ ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ }
+#endif
+
+ if (!ret)
+ return 0;
+
+ if (uart) {
+out_error_unmap:
+ iounmap(uart->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
+ }
+
+ return ret;
}
-static int bfin_serial_remove(struct platform_device *dev)
+static int __devexit bfin_serial_remove(struct platform_device *pdev)
{
- int i;
-
- for (i = 0; i < nr_active_ports; i++) {
- if (bfin_serial_ports[i].port.dev != &dev->dev)
- continue;
- uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
- bfin_serial_ports[i].port.dev = NULL;
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
- gpio_free(bfin_serial_ports[i].cts_pin);
- gpio_free(bfin_serial_ports[i].rts_pin);
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (uart) {
+ uart_remove_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->rts_pin >= 0)
+ gpio_free(uart->rts_pin);
#endif
+ iounmap(uart->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
}
return 0;
@@ -1425,31 +1426,160 @@ static int bfin_serial_remove(struct platform_device *dev)
static struct platform_driver bfin_serial_driver = {
.probe = bfin_serial_probe,
- .remove = bfin_serial_remove,
+ .remove = __devexit_p(bfin_serial_remove),
.suspend = bfin_serial_suspend,
.resume = bfin_serial_resume,
.driver = {
- .name = "bfin-uart",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
-static int __init bfin_serial_init(void)
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
+static __initdata struct early_platform_driver early_bfin_serial_driver = {
+ .class_str = CLASS_BFIN_CONSOLE,
+ .pdrv = &bfin_serial_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init bfin_serial_rs_console_init(void)
+{
+ early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
+
+ early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
+
+ register_console(&bfin_serial_console);
+
+ return 0;
+}
+console_initcall(bfin_serial_rs_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Memory can't be allocated dynamically during earlyprink init stage.
+ * So, do individual probe for earlyprink with a static uart port variable.
+ */
+static int bfin_earlyprintk_probe(struct platform_device *pdev)
{
+ struct resource *res;
int ret;
- pr_info("Serial: Blackfin serial driver\n");
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
+ return -ENOENT;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ bfin_earlyprintk_port.port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!bfin_earlyprintk_port.port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ bfin_earlyprintk_port.port.mapbase = res->start;
+ bfin_earlyprintk_port.port.line = pdev->id;
+ bfin_earlyprintk_port.port.uartclk = get_sclk();
+ bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ spin_lock_init(&bfin_earlyprintk_port.port.lock);
+
+ return 0;
+
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+
+ return ret;
+}
+
+static struct platform_driver bfin_earlyprintk_driver = {
+ .probe = bfin_earlyprintk_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
+ .class_str = CLASS_BFIN_EARLYPRINTK,
+ .pdrv = &bfin_earlyprintk_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+struct console __init *bfin_earlyserial_init(unsigned int port,
+ unsigned int cflag)
+{
+ struct ktermios t;
+ char port_name[20];
- bfin_serial_init_ports();
+ if (port < 0 || port >= BFIN_UART_NR_PORTS)
+ return NULL;
+
+ /*
+ * Only probe resource of the given port in earlyprintk boot arg.
+ * The expected port id should be indicated in port name string.
+ */
+ snprintf(port_name, 20, DRIVER_NAME ".%d", port);
+ early_platform_driver_register(&early_bfin_earlyprintk_driver,
+ port_name);
+ early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
+
+ if (!bfin_earlyprintk_port.port.membase)
+ return NULL;
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ /*
+ * If we are using early serial, don't let the normal console rewind
+ * log buffer, since that causes things to be printed multiple times
+ */
+ bfin_serial_console.flags &= ~CON_PRINTBUFFER;
+#endif
+
+ bfin_early_serial_console.index = port;
+ t.c_cflag = cflag;
+ t.c_iflag = 0;
+ t.c_oflag = 0;
+ t.c_lflag = ICANON;
+ t.c_line = port;
+ bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
+
+ return &bfin_early_serial_console;
+}
+#endif /* CONFIG_EARLY_PRINTK */
+
+static int __init bfin_serial_init(void)
+{
+ int ret;
+
+ pr_info("Blackfin serial driver\n");
ret = uart_register_driver(&bfin_serial_reg);
- if (ret == 0) {
- ret = platform_driver_register(&bfin_serial_driver);
- if (ret) {
- pr_debug("uart register failed\n");
- uart_unregister_driver(&bfin_serial_reg);
- }
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
+ bfin_serial_reg.driver_name, ret);
+ }
+
+ ret = platform_driver_register(&bfin_serial_driver);
+ if (ret) {
+ pr_err("fail to register bfin uart\n");
+ uart_unregister_driver(&bfin_serial_reg);
}
+
return ret;
}
@@ -1463,7 +1593,7 @@ static void __exit bfin_serial_exit(void)
module_init(bfin_serial_init);
module_exit(bfin_serial_exit);
-MODULE_AUTHOR("Aubrey.Li <aubrey.li@analog.com>");
+MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
MODULE_DESCRIPTION("Blackfin generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index dea7b5bf6e2..24b966d5061 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -469,7 +469,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
unsigned long rom_pl;
static int die_nmi_called;
- if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
+ if (ulReason != DIE_NMIUNKNOWN)
goto out;
if (!hpwdt_nmi_decoding)
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 7e051147679..814ac4e213a 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -9,6 +9,8 @@ config 9P_FS
If unsure, say N.
+if 9P_FS
+
config 9P_FSCACHE
bool "Enable 9P client caching support (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -20,7 +22,6 @@ config 9P_FSCACHE
config 9P_FS_POSIX_ACL
bool "9P POSIX Access Control Lists"
- depends on 9P_FS
select FS_POSIX_ACL
help
POSIX Access Control Lists (ACLs) support permissions for users and
@@ -30,3 +31,5 @@ config 9P_FS_POSIX_ACL
Linux website <http://acl.bestbits.at/>.
If you don't know what Access Control Lists are, say N
+
+endif
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index f8ba37effd1..ab8c1278063 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_9P_FS) := 9p.o
9p-objs := \
vfs_super.o \
vfs_inode.o \
+ vfs_inode_dotl.o \
vfs_addr.o \
vfs_file.o \
vfs_dir.o \
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 6e58c4ca1e6..02a2cf61631 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -28,7 +28,7 @@ static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
{
ssize_t size;
void *value = NULL;
- struct posix_acl *acl = NULL;;
+ struct posix_acl *acl = NULL;
size = v9fs_fid_xattr_get(fid, name, NULL, 0);
if (size > 0) {
@@ -365,7 +365,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
if (!S_ISDIR(inode->i_mode)) {
- retval = -EINVAL;
+ retval = acl ? -EINVAL : 0;
goto err_out;
}
break;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index cb6396855e2..c4b5d8864f0 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -113,9 +113,27 @@ struct v9fs_session_info {
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
char *);
-void v9fs_session_close(struct v9fs_session_info *v9ses);
-void v9fs_session_cancel(struct v9fs_session_info *v9ses);
-void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_close(struct v9fs_session_info *v9ses);
+extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nameidata);
+extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *p);
+extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses,
+ struct p9_fid *fid,
+ struct super_block *sb);
+
+extern const struct inode_operations v9fs_dir_inode_operations_dotl;
+extern const struct inode_operations v9fs_file_inode_operations_dotl;
+extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses,
+ struct p9_fid *fid,
+ struct super_block *sb);
/* other default globals */
#define V9FS_PORT 564
@@ -138,3 +156,21 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
{
return v9ses->flags & V9FS_PROTO_2000L;
}
+
+/**
+ * v9fs_inode_from_fid - Helper routine to populate an inode by
+ * issuing a attribute request
+ * @v9ses: session information
+ * @fid: fid to issue attribute request for
+ * @sb: superblock on which to create inode
+ *
+ */
+static inline struct inode *
+v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+ struct super_block *sb)
+{
+ if (v9fs_proto_dotl(v9ses))
+ return v9fs_inode_dotl(v9ses, fid, sb);
+ else
+ return v9fs_inode(v9ses, fid, sb);
+}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 59782981b22..5076eeb9550 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -49,15 +49,8 @@
static const struct inode_operations v9fs_dir_inode_operations;
static const struct inode_operations v9fs_dir_inode_operations_dotu;
-static const struct inode_operations v9fs_dir_inode_operations_dotl;
static const struct inode_operations v9fs_file_inode_operations;
-static const struct inode_operations v9fs_file_inode_operations_dotl;
static const struct inode_operations v9fs_symlink_inode_operations;
-static const struct inode_operations v9fs_symlink_inode_operations_dotl;
-
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
- dev_t rdev);
/**
* unixmode2p9mode - convert unix mode bits to plan 9
@@ -251,41 +244,6 @@ void v9fs_destroy_inode(struct inode *inode)
#endif
/**
- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
- * new file system object. This checks the S_ISGID to determine the owning
- * group of the new file system object.
- */
-
-static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
-{
- BUG_ON(dir_inode == NULL);
-
- if (dir_inode->i_mode & S_ISGID) {
- /* set_gid bit is set.*/
- return dir_inode->i_gid;
- }
- return current_fsgid();
-}
-
-/**
- * v9fs_dentry_from_dir_inode - helper function to get the dentry from
- * dir inode.
- *
- */
-
-static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
-{
- struct dentry *dentry;
-
- spin_lock(&inode->i_lock);
- /* Directory should have only one entry. */
- BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
- dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
- spin_unlock(&inode->i_lock);
- return dentry;
-}
-
-/**
* v9fs_get_inode - helper function to setup an inode
* @sb: superblock
* @mode: mode to setup inode with
@@ -454,7 +412,7 @@ void v9fs_evict_inode(struct inode *inode)
#endif
}
-static struct inode *
+struct inode *
v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
{
@@ -489,60 +447,6 @@ error:
return ERR_PTR(err);
}
-static struct inode *
-v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
-{
- struct inode *ret = NULL;
- int err;
- struct p9_stat_dotl *st;
-
- st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
- if (IS_ERR(st))
- return ERR_CAST(st);
-
- ret = v9fs_get_inode(sb, st->st_mode);
- if (IS_ERR(ret)) {
- err = PTR_ERR(ret);
- goto error;
- }
-
- v9fs_stat2inode_dotl(st, ret);
- ret->i_ino = v9fs_qid2ino(&st->qid);
-#ifdef CONFIG_9P_FSCACHE
- v9fs_vcookie_set_qid(ret, &st->qid);
- v9fs_cache_inode_get_cookie(ret);
-#endif
- err = v9fs_get_acl(ret, fid);
- if (err) {
- iput(ret);
- goto error;
- }
- kfree(st);
- return ret;
-error:
- kfree(st);
- return ERR_PTR(err);
-}
-
-/**
- * v9fs_inode_from_fid - Helper routine to populate an inode by
- * issuing a attribute request
- * @v9ses: session information
- * @fid: fid to issue attribute request for
- * @sb: superblock on which to create inode
- *
- */
-static inline struct inode *
-v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
-{
- if (v9fs_proto_dotl(v9ses))
- return v9fs_inode_dotl(v9ses, fid, sb);
- else
- return v9fs_inode(v9ses, fid, sb);
-}
-
/**
* v9fs_remove - helper function to remove files and directories
* @dir: directory inode that is being deleted
@@ -633,12 +537,6 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
goto error;
}
-
- if (v9ses->cache)
- d_set_d_op(dentry, &v9fs_cached_dentry_operations);
- else
- d_set_d_op(dentry, &v9fs_dentry_operations);
-
d_instantiate(dentry, inode);
err = v9fs_fid_add(dentry, fid);
if (err < 0)
@@ -657,144 +555,6 @@ error:
}
/**
- * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
- * @dir: directory inode that is being created
- * @dentry: dentry that is being deleted
- * @mode: create permissions
- * @nd: path information
- *
- */
-
-static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
- struct nameidata *nd)
-{
- int err = 0;
- char *name = NULL;
- gid_t gid;
- int flags;
- mode_t mode;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid = NULL;
- struct p9_fid *dfid, *ofid;
- struct file *filp;
- struct p9_qid qid;
- struct inode *inode;
- struct posix_acl *pacl = NULL, *dacl = NULL;
-
- v9ses = v9fs_inode2v9ses(dir);
- if (nd && nd->flags & LOOKUP_OPEN)
- flags = nd->intent.open.flags - 1;
- else {
- /*
- * create call without LOOKUP_OPEN is due
- * to mknod of regular files. So use mknod
- * operation.
- */
- return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
- }
-
- name = (char *) dentry->d_name.name;
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
- "mode:0x%x\n", name, flags, omode);
-
- dfid = v9fs_fid_lookup(dentry->d_parent);
- if (IS_ERR(dfid)) {
- err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- return err;
- }
-
- /* clone a fid to use for creation */
- ofid = p9_client_walk(dfid, 0, NULL, 1);
- if (IS_ERR(ofid)) {
- err = PTR_ERR(ofid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- return err;
- }
-
- gid = v9fs_get_fsgid_for_create(dir);
-
- mode = omode;
- /* Update mode based on ACL value */
- err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
- if (err) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "Failed to get acl values in creat %d\n", err);
- goto error;
- }
- err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
- if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "p9_client_open_dotl failed in creat %d\n",
- err);
- goto error;
- }
- /* instantiate inode and assign the unopened fid to the dentry */
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE ||
- (nd && nd->flags & LOOKUP_OPEN)) {
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
- fid = NULL;
- goto error;
- }
-
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_cached_dentry_operations);
- d_instantiate(dentry, inode);
- err = v9fs_fid_add(dentry, fid);
- if (err < 0)
- goto error;
- /* The fid would get clunked via a dput */
- fid = NULL;
- } else {
- /*
- * Not in cached mode. No need to populate
- * inode with stat. We need to get an inode
- * so that we can set the acl with dentry
- */
- inode = v9fs_get_inode(dir->i_sb, mode);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_dentry_operations);
- d_instantiate(dentry, inode);
- }
- /* Now set the ACL based on the default value */
- v9fs_set_create_acl(dentry, dacl, pacl);
-
- /* if we are opening a file, assign the open fid to the file */
- if (nd && nd->flags & LOOKUP_OPEN) {
- filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
- if (IS_ERR(filp)) {
- p9_client_clunk(ofid);
- return PTR_ERR(filp);
- }
- filp->private_data = ofid;
- } else
- p9_client_clunk(ofid);
-
- return 0;
-
-error:
- if (ofid)
- p9_client_clunk(ofid);
- if (fid)
- p9_client_clunk(fid);
- return err;
-}
-
-/**
* v9fs_vfs_create - VFS hook to create files
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
@@ -884,107 +644,6 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return err;
}
-
-/**
- * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
- * @dir: inode that is being unlinked
- * @dentry: dentry that is being unlinked
- * @mode: mode for new directory
- *
- */
-
-static int v9fs_vfs_mkdir_dotl(struct inode *dir,
- struct dentry *dentry, int omode)
-{
- int err;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid = NULL, *dfid = NULL;
- gid_t gid;
- char *name;
- mode_t mode;
- struct inode *inode;
- struct p9_qid qid;
- struct dentry *dir_dentry;
- struct posix_acl *dacl = NULL, *pacl = NULL;
-
- P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
- err = 0;
- v9ses = v9fs_inode2v9ses(dir);
-
- omode |= S_IFDIR;
- if (dir->i_mode & S_ISGID)
- omode |= S_ISGID;
-
- dir_dentry = v9fs_dentry_from_dir_inode(dir);
- dfid = v9fs_fid_lookup(dir_dentry);
- if (IS_ERR(dfid)) {
- err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- dfid = NULL;
- goto error;
- }
-
- gid = v9fs_get_fsgid_for_create(dir);
- mode = omode;
- /* Update mode based on ACL value */
- err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
- if (err) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "Failed to get acl values in mkdir %d\n", err);
- goto error;
- }
- name = (char *) dentry->d_name.name;
- err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
- if (err < 0)
- goto error;
-
- /* instantiate inode and assign the unopened fid to the dentry */
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
- fid = NULL;
- goto error;
- }
-
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_cached_dentry_operations);
- d_instantiate(dentry, inode);
- err = v9fs_fid_add(dentry, fid);
- if (err < 0)
- goto error;
- fid = NULL;
- } else {
- /*
- * Not in cached mode. No need to populate
- * inode with stat. We need to get an inode
- * so that we can set the acl with dentry
- */
- inode = v9fs_get_inode(dir->i_sb, mode);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_dentry_operations);
- d_instantiate(dentry, inode);
- }
- /* Now set the ACL based on the default value */
- v9fs_set_create_acl(dentry, dacl, pacl);
-
-error:
- if (fid)
- p9_client_clunk(fid);
- return err;
-}
-
/**
* v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode
* @dir: inode that is being walked from
@@ -993,7 +652,7 @@ error:
*
*/
-static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nameidata)
{
struct super_block *sb;
@@ -1063,7 +722,7 @@ error:
*
*/
-static int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
+int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
{
return v9fs_remove(i, d, 0);
}
@@ -1075,7 +734,7 @@ static int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
*
*/
-static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
+int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
{
return v9fs_remove(i, d, 1);
}
@@ -1089,7 +748,7 @@ static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
*
*/
-static int
+int
v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
@@ -1196,42 +855,6 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
-static int
-v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat)
-{
- int err;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid;
- struct p9_stat_dotl *st;
-
- P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
- err = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- return simple_getattr(mnt, dentry, stat);
-
- fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
-
- /* Ask for all the fields in stat structure. Server will return
- * whatever it supports
- */
-
- st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
- if (IS_ERR(st))
- return PTR_ERR(st);
-
- v9fs_stat2inode_dotl(st, dentry->d_inode);
- generic_fillattr(dentry->d_inode, stat);
- /* Change block size to what the server returned */
- stat->blksize = st->st_blksize;
-
- kfree(st);
- return 0;
-}
-
/**
* v9fs_vfs_setattr - set file metadata
* @dentry: file whose metadata to set
@@ -1291,64 +914,6 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
}
/**
- * v9fs_vfs_setattr_dotl - set file metadata
- * @dentry: file whose metadata to set
- * @iattr: metadata assignment structure
- *
- */
-
-int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
-{
- int retval;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid;
- struct p9_iattr_dotl p9attr;
-
- P9_DPRINTK(P9_DEBUG_VFS, "\n");
-
- retval = inode_change_ok(dentry->d_inode, iattr);
- if (retval)
- return retval;
-
- p9attr.valid = iattr->ia_valid;
- p9attr.mode = iattr->ia_mode;
- p9attr.uid = iattr->ia_uid;
- p9attr.gid = iattr->ia_gid;
- p9attr.size = iattr->ia_size;
- p9attr.atime_sec = iattr->ia_atime.tv_sec;
- p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
- p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
- p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
-
- retval = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
- fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
-
- retval = p9_client_setattr(fid, &p9attr);
- if (retval < 0)
- return retval;
-
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(dentry->d_inode)) {
- retval = vmtruncate(dentry->d_inode, iattr->ia_size);
- if (retval)
- return retval;
- }
-
- setattr_copy(dentry->d_inode, iattr);
- mark_inode_dirty(dentry->d_inode);
- if (iattr->ia_valid & ATTR_MODE) {
- /* We also want to update ACL when we update mode bits */
- retval = v9fs_acl_chmod(dentry);
- if (retval < 0)
- return retval;
- }
- return 0;
-}
-
-/**
* v9fs_stat2inode - populate an inode structure with mistat info
* @stat: Plan 9 metadata (mistat) structure
* @inode: inode to populate
@@ -1426,77 +991,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
}
/**
- * v9fs_stat2inode_dotl - populate an inode structure with stat info
- * @stat: stat structure
- * @inode: inode to populate
- * @sb: superblock of filesystem
- *
- */
-
-void
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
-{
-
- if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
- inode->i_ctime.tv_sec = stat->st_ctime_sec;
- inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
- inode->i_uid = stat->st_uid;
- inode->i_gid = stat->st_gid;
- inode->i_nlink = stat->st_nlink;
- inode->i_mode = stat->st_mode;
- inode->i_rdev = new_decode_dev(stat->st_rdev);
-
- if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
-
- i_size_write(inode, stat->st_size);
- inode->i_blocks = stat->st_blocks;
- } else {
- if (stat->st_result_mask & P9_STATS_ATIME) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
- }
- if (stat->st_result_mask & P9_STATS_MTIME) {
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
- }
- if (stat->st_result_mask & P9_STATS_CTIME) {
- inode->i_ctime.tv_sec = stat->st_ctime_sec;
- inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
- }
- if (stat->st_result_mask & P9_STATS_UID)
- inode->i_uid = stat->st_uid;
- if (stat->st_result_mask & P9_STATS_GID)
- inode->i_gid = stat->st_gid;
- if (stat->st_result_mask & P9_STATS_NLINK)
- inode->i_nlink = stat->st_nlink;
- if (stat->st_result_mask & P9_STATS_MODE) {
- inode->i_mode = stat->st_mode;
- if ((S_ISBLK(inode->i_mode)) ||
- (S_ISCHR(inode->i_mode)))
- init_special_inode(inode, inode->i_mode,
- inode->i_rdev);
- }
- if (stat->st_result_mask & P9_STATS_RDEV)
- inode->i_rdev = new_decode_dev(stat->st_rdev);
- if (stat->st_result_mask & P9_STATS_SIZE)
- i_size_write(inode, stat->st_size);
- if (stat->st_result_mask & P9_STATS_BLOCKS)
- inode->i_blocks = stat->st_blocks;
- }
- if (stat->st_result_mask & P9_STATS_GEN)
- inode->i_generation = stat->st_gen;
-
- /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
- * because the inode structure does not have fields for them.
- */
-}
-
-/**
* v9fs_qid2ino - convert qid into inode number
* @qid: qid to hash
*
@@ -1602,7 +1096,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
*
*/
-static void
+void
v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
char *s = nd_get_link(nd);
@@ -1646,94 +1140,6 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
}
/**
- * v9fs_vfs_symlink_dotl - helper function to create symlinks
- * @dir: directory inode containing symlink
- * @dentry: dentry for symlink
- * @symname: symlink data
- *
- * See Also: 9P2000.L RFC for more information
- *
- */
-
-static int
-v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
- const char *symname)
-{
- struct v9fs_session_info *v9ses;
- struct p9_fid *dfid;
- struct p9_fid *fid = NULL;
- struct inode *inode;
- struct p9_qid qid;
- char *name;
- int err;
- gid_t gid;
-
- name = (char *) dentry->d_name.name;
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
- dir->i_ino, name, symname);
- v9ses = v9fs_inode2v9ses(dir);
-
- dfid = v9fs_fid_lookup(dentry->d_parent);
- if (IS_ERR(dfid)) {
- err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- return err;
- }
-
- gid = v9fs_get_fsgid_for_create(dir);
-
- /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
- err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
-
- if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
- goto error;
- }
-
- if (v9ses->cache) {
- /* Now walk from the parent so we can get an unopened fid. */
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
- fid = NULL;
- goto error;
- }
-
- /* instantiate inode and assign the unopened fid to dentry */
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_cached_dentry_operations);
- d_instantiate(dentry, inode);
- err = v9fs_fid_add(dentry, fid);
- if (err < 0)
- goto error;
- fid = NULL;
- } else {
- /* Not in cached mode. No need to populate inode with stat */
- inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_dentry_operations);
- d_instantiate(dentry, inode);
- }
-
-error:
- if (fid)
- p9_client_clunk(fid);
-
- return err;
-}
-
-/**
* v9fs_vfs_symlink - helper function to create symlinks
* @dir: directory inode containing symlink
* @dentry: dentry for symlink
@@ -1792,77 +1198,6 @@ clunk_fid:
}
/**
- * v9fs_vfs_link_dotl - create a hardlink for dotl
- * @old_dentry: dentry for file to link to
- * @dir: inode destination for new link
- * @dentry: dentry for link
- *
- */
-
-static int
-v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
- struct dentry *dentry)
-{
- int err;
- struct p9_fid *dfid, *oldfid;
- char *name;
- struct v9fs_session_info *v9ses;
- struct dentry *dir_dentry;
-
- P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
- dir->i_ino, old_dentry->d_name.name,
- dentry->d_name.name);
-
- v9ses = v9fs_inode2v9ses(dir);
- dir_dentry = v9fs_dentry_from_dir_inode(dir);
- dfid = v9fs_fid_lookup(dir_dentry);
- if (IS_ERR(dfid))
- return PTR_ERR(dfid);
-
- oldfid = v9fs_fid_lookup(old_dentry);
- if (IS_ERR(oldfid))
- return PTR_ERR(oldfid);
-
- name = (char *) dentry->d_name.name;
-
- err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
-
- if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
- return err;
- }
-
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- /* Get the latest stat info from server. */
- struct p9_fid *fid;
- struct p9_stat_dotl *st;
-
- fid = v9fs_fid_lookup(old_dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
-
- st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
- if (IS_ERR(st))
- return PTR_ERR(st);
-
- v9fs_stat2inode_dotl(st, old_dentry->d_inode);
-
- kfree(st);
- } else {
- /* Caching disabled. No need to get upto date stat info.
- * This dentry will be released immediately. So, just hold the
- * inode
- */
- ihold(old_dentry->d_inode);
- }
-
- d_set_d_op(dentry, old_dentry->d_op);
- d_instantiate(dentry, old_dentry->d_inode);
-
- return err;
-}
-
-/**
* v9fs_vfs_mknod - create a special file
* @dir: inode destination for new link
* @dentry: dentry for file
@@ -1907,160 +1242,6 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
return retval;
}
-/**
- * v9fs_vfs_mknod_dotl - create a special file
- * @dir: inode destination for new link
- * @dentry: dentry for file
- * @mode: mode for creation
- * @rdev: device associated with special file
- *
- */
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
- dev_t rdev)
-{
- int err;
- char *name;
- mode_t mode;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid = NULL, *dfid = NULL;
- struct inode *inode;
- gid_t gid;
- struct p9_qid qid;
- struct dentry *dir_dentry;
- struct posix_acl *dacl = NULL, *pacl = NULL;
-
- P9_DPRINTK(P9_DEBUG_VFS,
- " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
- dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
-
- if (!new_valid_dev(rdev))
- return -EINVAL;
-
- v9ses = v9fs_inode2v9ses(dir);
- dir_dentry = v9fs_dentry_from_dir_inode(dir);
- dfid = v9fs_fid_lookup(dir_dentry);
- if (IS_ERR(dfid)) {
- err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- dfid = NULL;
- goto error;
- }
-
- gid = v9fs_get_fsgid_for_create(dir);
- mode = omode;
- /* Update mode based on ACL value */
- err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
- if (err) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "Failed to get acl values in mknod %d\n", err);
- goto error;
- }
- name = (char *) dentry->d_name.name;
-
- err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
- if (err < 0)
- goto error;
-
- /* instantiate inode and assign the unopened fid to the dentry */
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
- fid = NULL;
- goto error;
- }
-
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_cached_dentry_operations);
- d_instantiate(dentry, inode);
- err = v9fs_fid_add(dentry, fid);
- if (err < 0)
- goto error;
- fid = NULL;
- } else {
- /*
- * Not in cached mode. No need to populate inode with stat.
- * socket syscall returns a fd, so we need instantiate
- */
- inode = v9fs_get_inode(dir->i_sb, mode);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
- d_set_d_op(dentry, &v9fs_dentry_operations);
- d_instantiate(dentry, inode);
- }
- /* Now set the ACL based on the default value */
- v9fs_set_create_acl(dentry, dacl, pacl);
-error:
- if (fid)
- p9_client_clunk(fid);
- return err;
-}
-
-static int
-v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen)
-{
- int retval;
- struct p9_fid *fid;
- char *target = NULL;
-
- P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
- retval = -EPERM;
- fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
-
- retval = p9_client_readlink(fid, &target);
- if (retval < 0)
- return retval;
-
- strncpy(buffer, target, buflen);
- P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer);
-
- retval = strnlen(buffer, buflen);
- return retval;
-}
-
-/**
- * v9fs_vfs_follow_link_dotl - follow a symlink path
- * @dentry: dentry for symlink
- * @nd: nameidata
- *
- */
-
-static void *
-v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
-{
- int len = 0;
- char *link = __getname();
-
- P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
-
- if (!link)
- link = ERR_PTR(-ENOMEM);
- else {
- len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX);
- if (len < 0) {
- __putname(link);
- link = ERR_PTR(len);
- } else
- link[min(len, PATH_MAX-1)] = 0;
- }
- nd_set_link(nd, link);
-
- return NULL;
-}
-
static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
@@ -2075,25 +1256,6 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.setattr = v9fs_vfs_setattr,
};
-static const struct inode_operations v9fs_dir_inode_operations_dotl = {
- .create = v9fs_vfs_create_dotl,
- .lookup = v9fs_vfs_lookup,
- .link = v9fs_vfs_link_dotl,
- .symlink = v9fs_vfs_symlink_dotl,
- .unlink = v9fs_vfs_unlink,
- .mkdir = v9fs_vfs_mkdir_dotl,
- .rmdir = v9fs_vfs_rmdir,
- .mknod = v9fs_vfs_mknod_dotl,
- .rename = v9fs_vfs_rename,
- .getattr = v9fs_vfs_getattr_dotl,
- .setattr = v9fs_vfs_setattr_dotl,
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .removexattr = generic_removexattr,
- .listxattr = v9fs_listxattr,
- .check_acl = v9fs_check_acl,
-};
-
static const struct inode_operations v9fs_dir_inode_operations = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
@@ -2111,16 +1273,6 @@ static const struct inode_operations v9fs_file_inode_operations = {
.setattr = v9fs_vfs_setattr,
};
-static const struct inode_operations v9fs_file_inode_operations_dotl = {
- .getattr = v9fs_vfs_getattr_dotl,
- .setattr = v9fs_vfs_setattr_dotl,
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .removexattr = generic_removexattr,
- .listxattr = v9fs_listxattr,
- .check_acl = v9fs_check_acl,
-};
-
static const struct inode_operations v9fs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = v9fs_vfs_follow_link,
@@ -2129,14 +1281,3 @@ static const struct inode_operations v9fs_symlink_inode_operations = {
.setattr = v9fs_vfs_setattr,
};
-static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
- .readlink = v9fs_vfs_readlink_dotl,
- .follow_link = v9fs_vfs_follow_link_dotl,
- .put_link = v9fs_vfs_put_link,
- .getattr = v9fs_vfs_getattr_dotl,
- .setattr = v9fs_vfs_setattr_dotl,
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .removexattr = generic_removexattr,
- .listxattr = v9fs_listxattr,
-};
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
new file mode 100644
index 00000000000..fe3ffa9aace
--- /dev/null
+++ b/fs/9p/vfs_inode_dotl.c
@@ -0,0 +1,824 @@
+/*
+ * linux/fs/9p/vfs_inode_dotl.c
+ *
+ * This file contains vfs inode ops for the 9P2000.L protocol.
+ *
+ * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to:
+ * Free Software Foundation
+ * 51 Franklin Street, Fifth Floor
+ * Boston, MA 02111-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/inet.h>
+#include <linux/namei.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+
+#include "v9fs.h"
+#include "v9fs_vfs.h"
+#include "fid.h"
+#include "cache.h"
+#include "xattr.h"
+#include "acl.h"
+
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+ dev_t rdev);
+
+/**
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * new file system object. This checks the S_ISGID to determine the owning
+ * group of the new file system object.
+ */
+
+static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
+{
+ BUG_ON(dir_inode == NULL);
+
+ if (dir_inode->i_mode & S_ISGID) {
+ /* set_gid bit is set.*/
+ return dir_inode->i_gid;
+ }
+ return current_fsgid();
+}
+
+/**
+ * v9fs_dentry_from_dir_inode - helper function to get the dentry from
+ * dir inode.
+ *
+ */
+
+static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
+{
+ struct dentry *dentry;
+
+ spin_lock(&inode->i_lock);
+ /* Directory should have only one entry. */
+ BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
+ dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+ spin_unlock(&inode->i_lock);
+ return dentry;
+}
+
+struct inode *
+v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+ struct super_block *sb)
+{
+ struct inode *ret = NULL;
+ int err;
+ struct p9_stat_dotl *st;
+
+ st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+ if (IS_ERR(st))
+ return ERR_CAST(st);
+
+ ret = v9fs_get_inode(sb, st->st_mode);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto error;
+ }
+
+ v9fs_stat2inode_dotl(st, ret);
+ ret->i_ino = v9fs_qid2ino(&st->qid);
+#ifdef CONFIG_9P_FSCACHE
+ v9fs_vcookie_set_qid(ret, &st->qid);
+ v9fs_cache_inode_get_cookie(ret);
+#endif
+ err = v9fs_get_acl(ret, fid);
+ if (err) {
+ iput(ret);
+ goto error;
+ }
+ kfree(st);
+ return ret;
+error:
+ kfree(st);
+ return ERR_PTR(err);
+}
+
+/**
+ * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @dir: directory inode that is being created
+ * @dentry: dentry that is being deleted
+ * @mode: create permissions
+ * @nd: path information
+ *
+ */
+
+static int
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
+ struct nameidata *nd)
+{
+ int err = 0;
+ char *name = NULL;
+ gid_t gid;
+ int flags;
+ mode_t mode;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid = NULL;
+ struct p9_fid *dfid, *ofid;
+ struct file *filp;
+ struct p9_qid qid;
+ struct inode *inode;
+ struct posix_acl *pacl = NULL, *dacl = NULL;
+
+ v9ses = v9fs_inode2v9ses(dir);
+ if (nd && nd->flags & LOOKUP_OPEN)
+ flags = nd->intent.open.flags - 1;
+ else {
+ /*
+ * create call without LOOKUP_OPEN is due
+ * to mknod of regular files. So use mknod
+ * operation.
+ */
+ return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+ }
+
+ name = (char *) dentry->d_name.name;
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
+ "mode:0x%x\n", name, flags, omode);
+
+ dfid = v9fs_fid_lookup(dentry->d_parent);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ return err;
+ }
+
+ /* clone a fid to use for creation */
+ ofid = p9_client_walk(dfid, 0, NULL, 1);
+ if (IS_ERR(ofid)) {
+ err = PTR_ERR(ofid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+ return err;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+
+ mode = omode;
+ /* Update mode based on ACL value */
+ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "Failed to get acl values in creat %d\n", err);
+ goto error;
+ }
+ err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "p9_client_open_dotl failed in creat %d\n",
+ err);
+ goto error;
+ }
+
+ /* instantiate inode and assign the unopened fid to the dentry */
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+ fid = NULL;
+ goto error;
+ }
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
+ goto error;
+ }
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+
+ /* Now set the ACL based on the default value */
+ v9fs_set_create_acl(dentry, dacl, pacl);
+
+ /* Since we are opening a file, assign the open fid to the file */
+ filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
+ if (IS_ERR(filp)) {
+ p9_client_clunk(ofid);
+ return PTR_ERR(filp);
+ }
+ filp->private_data = ofid;
+ return 0;
+
+error:
+ if (ofid)
+ p9_client_clunk(ofid);
+ if (fid)
+ p9_client_clunk(fid);
+ return err;
+}
+
+/**
+ * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @dir: inode that is being unlinked
+ * @dentry: dentry that is being unlinked
+ * @mode: mode for new directory
+ *
+ */
+
+static int v9fs_vfs_mkdir_dotl(struct inode *dir,
+ struct dentry *dentry, int omode)
+{
+ int err;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid = NULL, *dfid = NULL;
+ gid_t gid;
+ char *name;
+ mode_t mode;
+ struct inode *inode;
+ struct p9_qid qid;
+ struct dentry *dir_dentry;
+ struct posix_acl *dacl = NULL, *pacl = NULL;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ err = 0;
+ v9ses = v9fs_inode2v9ses(dir);
+
+ omode |= S_IFDIR;
+ if (dir->i_mode & S_ISGID)
+ omode |= S_ISGID;
+
+ dir_dentry = v9fs_dentry_from_dir_inode(dir);
+ dfid = v9fs_fid_lookup(dir_dentry);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ dfid = NULL;
+ goto error;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+ mode = omode;
+ /* Update mode based on ACL value */
+ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "Failed to get acl values in mkdir %d\n", err);
+ goto error;
+ }
+ name = (char *) dentry->d_name.name;
+ err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
+ if (err < 0)
+ goto error;
+
+ /* instantiate inode and assign the unopened fid to the dentry */
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
+ }
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ fid = NULL;
+ } else {
+ /*
+ * Not in cached mode. No need to populate
+ * inode with stat. We need to get an inode
+ * so that we can set the acl with dentry
+ */
+ inode = v9fs_get_inode(dir->i_sb, mode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+ d_instantiate(dentry, inode);
+ }
+ /* Now set the ACL based on the default value */
+ v9fs_set_create_acl(dentry, dacl, pacl);
+
+error:
+ if (fid)
+ p9_client_clunk(fid);
+ return err;
+}
+
+static int
+v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ int err;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid;
+ struct p9_stat_dotl *st;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+ err = -EPERM;
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
+ return simple_getattr(mnt, dentry, stat);
+
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ /* Ask for all the fields in stat structure. Server will return
+ * whatever it supports
+ */
+
+ st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
+ if (IS_ERR(st))
+ return PTR_ERR(st);
+
+ v9fs_stat2inode_dotl(st, dentry->d_inode);
+ generic_fillattr(dentry->d_inode, stat);
+ /* Change block size to what the server returned */
+ stat->blksize = st->st_blksize;
+
+ kfree(st);
+ return 0;
+}
+
+/**
+ * v9fs_vfs_setattr_dotl - set file metadata
+ * @dentry: file whose metadata to set
+ * @iattr: metadata assignment structure
+ *
+ */
+
+int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+{
+ int retval;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid;
+ struct p9_iattr_dotl p9attr;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "\n");
+
+ retval = inode_change_ok(dentry->d_inode, iattr);
+ if (retval)
+ return retval;
+
+ p9attr.valid = iattr->ia_valid;
+ p9attr.mode = iattr->ia_mode;
+ p9attr.uid = iattr->ia_uid;
+ p9attr.gid = iattr->ia_gid;
+ p9attr.size = iattr->ia_size;
+ p9attr.atime_sec = iattr->ia_atime.tv_sec;
+ p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+ p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+ p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+
+ retval = -EPERM;
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ retval = p9_client_setattr(fid, &p9attr);
+ if (retval < 0)
+ return retval;
+
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(dentry->d_inode)) {
+ retval = vmtruncate(dentry->d_inode, iattr->ia_size);
+ if (retval)
+ return retval;
+ }
+
+ setattr_copy(dentry->d_inode, iattr);
+ mark_inode_dirty(dentry->d_inode);
+ if (iattr->ia_valid & ATTR_MODE) {
+ /* We also want to update ACL when we update mode bits */
+ retval = v9fs_acl_chmod(dentry);
+ if (retval < 0)
+ return retval;
+ }
+ return 0;
+}
+
+/**
+ * v9fs_stat2inode_dotl - populate an inode structure with stat info
+ * @stat: stat structure
+ * @inode: inode to populate
+ * @sb: superblock of filesystem
+ *
+ */
+
+void
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+{
+
+ if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
+ inode->i_atime.tv_sec = stat->st_atime_sec;
+ inode->i_atime.tv_nsec = stat->st_atime_nsec;
+ inode->i_mtime.tv_sec = stat->st_mtime_sec;
+ inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ inode->i_ctime.tv_sec = stat->st_ctime_sec;
+ inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ inode->i_uid = stat->st_uid;
+ inode->i_gid = stat->st_gid;
+ inode->i_nlink = stat->st_nlink;
+ inode->i_mode = stat->st_mode;
+ inode->i_rdev = new_decode_dev(stat->st_rdev);
+
+ if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+
+ i_size_write(inode, stat->st_size);
+ inode->i_blocks = stat->st_blocks;
+ } else {
+ if (stat->st_result_mask & P9_STATS_ATIME) {
+ inode->i_atime.tv_sec = stat->st_atime_sec;
+ inode->i_atime.tv_nsec = stat->st_atime_nsec;
+ }
+ if (stat->st_result_mask & P9_STATS_MTIME) {
+ inode->i_mtime.tv_sec = stat->st_mtime_sec;
+ inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ }
+ if (stat->st_result_mask & P9_STATS_CTIME) {
+ inode->i_ctime.tv_sec = stat->st_ctime_sec;
+ inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ }
+ if (stat->st_result_mask & P9_STATS_UID)
+ inode->i_uid = stat->st_uid;
+ if (stat->st_result_mask & P9_STATS_GID)
+ inode->i_gid = stat->st_gid;
+ if (stat->st_result_mask & P9_STATS_NLINK)
+ inode->i_nlink = stat->st_nlink;
+ if (stat->st_result_mask & P9_STATS_MODE) {
+ inode->i_mode = stat->st_mode;
+ if ((S_ISBLK(inode->i_mode)) ||
+ (S_ISCHR(inode->i_mode)))
+ init_special_inode(inode, inode->i_mode,
+ inode->i_rdev);
+ }
+ if (stat->st_result_mask & P9_STATS_RDEV)
+ inode->i_rdev = new_decode_dev(stat->st_rdev);
+ if (stat->st_result_mask & P9_STATS_SIZE)
+ i_size_write(inode, stat->st_size);
+ if (stat->st_result_mask & P9_STATS_BLOCKS)
+ inode->i_blocks = stat->st_blocks;
+ }
+ if (stat->st_result_mask & P9_STATS_GEN)
+ inode->i_generation = stat->st_gen;
+
+ /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
+ * because the inode structure does not have fields for them.
+ */
+}
+
+static int
+v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *dfid;
+ struct p9_fid *fid = NULL;
+ struct inode *inode;
+ struct p9_qid qid;
+ char *name;
+ int err;
+ gid_t gid;
+
+ name = (char *) dentry->d_name.name;
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
+ dir->i_ino, name, symname);
+ v9ses = v9fs_inode2v9ses(dir);
+
+ dfid = v9fs_fid_lookup(dentry->d_parent);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ return err;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+
+ /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
+ err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
+
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
+ goto error;
+ }
+
+ if (v9ses->cache) {
+ /* Now walk from the parent so we can get an unopened fid. */
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
+ /* instantiate inode and assign the unopened fid to dentry */
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
+ }
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ fid = NULL;
+ } else {
+ /* Not in cached mode. No need to populate inode with stat */
+ inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+ d_instantiate(dentry, inode);
+ }
+
+error:
+ if (fid)
+ p9_client_clunk(fid);
+
+ return err;
+}
+
+/**
+ * v9fs_vfs_link_dotl - create a hardlink for dotl
+ * @old_dentry: dentry for file to link to
+ * @dir: inode destination for new link
+ * @dentry: dentry for link
+ *
+ */
+
+static int
+v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ int err;
+ struct p9_fid *dfid, *oldfid;
+ char *name;
+ struct v9fs_session_info *v9ses;
+ struct dentry *dir_dentry;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
+ dir->i_ino, old_dentry->d_name.name,
+ dentry->d_name.name);
+
+ v9ses = v9fs_inode2v9ses(dir);
+ dir_dentry = v9fs_dentry_from_dir_inode(dir);
+ dfid = v9fs_fid_lookup(dir_dentry);
+ if (IS_ERR(dfid))
+ return PTR_ERR(dfid);
+
+ oldfid = v9fs_fid_lookup(old_dentry);
+ if (IS_ERR(oldfid))
+ return PTR_ERR(oldfid);
+
+ name = (char *) dentry->d_name.name;
+
+ err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
+
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
+ return err;
+ }
+
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ /* Get the latest stat info from server. */
+ struct p9_fid *fid;
+ struct p9_stat_dotl *st;
+
+ fid = v9fs_fid_lookup(old_dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+ if (IS_ERR(st))
+ return PTR_ERR(st);
+
+ v9fs_stat2inode_dotl(st, old_dentry->d_inode);
+
+ kfree(st);
+ } else {
+ /* Caching disabled. No need to get upto date stat info.
+ * This dentry will be released immediately. So, just hold the
+ * inode
+ */
+ ihold(old_dentry->d_inode);
+ }
+ d_instantiate(dentry, old_dentry->d_inode);
+
+ return err;
+}
+
+/**
+ * v9fs_vfs_mknod_dotl - create a special file
+ * @dir: inode destination for new link
+ * @dentry: dentry for file
+ * @mode: mode for creation
+ * @rdev: device associated with special file
+ *
+ */
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+ dev_t rdev)
+{
+ int err;
+ char *name;
+ mode_t mode;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid = NULL, *dfid = NULL;
+ struct inode *inode;
+ gid_t gid;
+ struct p9_qid qid;
+ struct dentry *dir_dentry;
+ struct posix_acl *dacl = NULL, *pacl = NULL;
+
+ P9_DPRINTK(P9_DEBUG_VFS,
+ " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
+ dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
+
+ if (!new_valid_dev(rdev))
+ return -EINVAL;
+
+ v9ses = v9fs_inode2v9ses(dir);
+ dir_dentry = v9fs_dentry_from_dir_inode(dir);
+ dfid = v9fs_fid_lookup(dir_dentry);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ dfid = NULL;
+ goto error;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+ mode = omode;
+ /* Update mode based on ACL value */
+ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "Failed to get acl values in mknod %d\n", err);
+ goto error;
+ }
+ name = (char *) dentry->d_name.name;
+
+ err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
+ if (err < 0)
+ goto error;
+
+ /* instantiate inode and assign the unopened fid to the dentry */
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
+ }
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ fid = NULL;
+ } else {
+ /*
+ * Not in cached mode. No need to populate inode with stat.
+ * socket syscall returns a fd, so we need instantiate
+ */
+ inode = v9fs_get_inode(dir->i_sb, mode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+ d_instantiate(dentry, inode);
+ }
+ /* Now set the ACL based on the default value */
+ v9fs_set_create_acl(dentry, dacl, pacl);
+error:
+ if (fid)
+ p9_client_clunk(fid);
+ return err;
+}
+
+/**
+ * v9fs_vfs_follow_link_dotl - follow a symlink path
+ * @dentry: dentry for symlink
+ * @nd: nameidata
+ *
+ */
+
+static void *
+v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
+{
+ int retval;
+ struct p9_fid *fid;
+ char *link = __getname();
+ char *target;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+
+ if (!link) {
+ link = ERR_PTR(-ENOMEM);
+ goto ndset;
+ }
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid)) {
+ __putname(link);
+ link = ERR_PTR(PTR_ERR(fid));
+ goto ndset;
+ }
+ retval = p9_client_readlink(fid, &target);
+ if (!retval) {
+ strcpy(link, target);
+ kfree(target);
+ goto ndset;
+ }
+ __putname(link);
+ link = ERR_PTR(retval);
+ndset:
+ nd_set_link(nd, link);
+ return NULL;
+}
+
+const struct inode_operations v9fs_dir_inode_operations_dotl = {
+ .create = v9fs_vfs_create_dotl,
+ .lookup = v9fs_vfs_lookup,
+ .link = v9fs_vfs_link_dotl,
+ .symlink = v9fs_vfs_symlink_dotl,
+ .unlink = v9fs_vfs_unlink,
+ .mkdir = v9fs_vfs_mkdir_dotl,
+ .rmdir = v9fs_vfs_rmdir,
+ .mknod = v9fs_vfs_mknod_dotl,
+ .rename = v9fs_vfs_rename,
+ .getattr = v9fs_vfs_getattr_dotl,
+ .setattr = v9fs_vfs_setattr_dotl,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .removexattr = generic_removexattr,
+ .listxattr = v9fs_listxattr,
+ .check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_file_inode_operations_dotl = {
+ .getattr = v9fs_vfs_getattr_dotl,
+ .setattr = v9fs_vfs_setattr_dotl,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .removexattr = generic_removexattr,
+ .listxattr = v9fs_listxattr,
+ .check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_symlink_inode_operations_dotl = {
+ .readlink = generic_readlink,
+ .follow_link = v9fs_vfs_follow_link_dotl,
+ .put_link = v9fs_vfs_put_link,
+ .getattr = v9fs_vfs_getattr_dotl,
+ .setattr = v9fs_vfs_setattr_dotl,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .removexattr = generic_removexattr,
+ .listxattr = v9fs_listxattr,
+};
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 43ec7df8433..d288773871b 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -133,7 +133,7 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
"p9_client_xattrcreate failed %d\n", retval);
goto error;
}
- msize = fid->clnt->msize;;
+ msize = fid->clnt->msize;
while (value_len) {
if (value_len > (msize - P9_IOHDRSZ))
write_count = msize - P9_IOHDRSZ;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 2709b34206a..47cda410b54 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -28,21 +28,30 @@
typedef struct ext2_dir_entry_2 ext2_dirent;
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
+#if (PAGE_CACHE_SIZE >= 65536)
if (len == EXT2_MAX_REC_LEN)
return 1 << 16;
+#endif
return len;
}
static inline __le16 ext2_rec_len_to_disk(unsigned len)
{
+#if (PAGE_CACHE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(EXT2_MAX_REC_LEN);
else
BUG_ON(len > (1 << 16));
+#endif
return cpu_to_le16(len);
}
@@ -129,15 +138,15 @@ static void ext2_check_page(struct page *page, int quiet)
p = (ext2_dirent *)(kaddr + offs);
rec_len = ext2_rec_len_from_disk(p->rec_len);
- if (rec_len < EXT2_DIR_REC_LEN(1))
+ if (unlikely(rec_len < EXT2_DIR_REC_LEN(1)))
goto Eshort;
- if (rec_len & 3)
+ if (unlikely(rec_len & 3))
goto Ealign;
- if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
+ if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
goto Enamelen;
- if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+ if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)))
goto Espan;
- if (le32_to_cpu(p->inode) > max_inumber)
+ if (unlikely(le32_to_cpu(p->inode) > max_inumber))
goto Einumber;
}
if (offs != limit)
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index f8aecd2e329..2e1d8341d82 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -67,7 +67,7 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
inode = NULL;
if (ino) {
inode = ext2_iget(dir->i_sb, ino);
- if (unlikely(IS_ERR(inode))) {
+ if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ESTALE) {
ext2_error(dir->i_sb, __func__,
"deleted inode referenced: %lu",
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index e0c6380ff99..7731695e65d 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -43,9 +43,10 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
-void ext2_error (struct super_block * sb, const char * function,
- const char * fmt, ...)
+void ext2_error(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
struct ext2_sb_info *sbi = EXT2_SB(sb);
struct ext2_super_block *es = sbi->s_es;
@@ -59,9 +60,13 @@ void ext2_error (struct super_block * sb, const char * function,
}
va_start(args, fmt);
- printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function);
- vprintk(fmt, args);
- printk("\n");
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
+ sb->s_id, function, &vaf);
+
va_end(args);
if (test_opt(sb, ERRORS_PANIC))
@@ -76,12 +81,16 @@ void ext2_error (struct super_block * sb, const char * function,
void ext2_msg(struct super_block *sb, const char *prefix,
const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk("%sEXT2-fs (%s): ", prefix, sb->s_id);
- vprintk(fmt, args);
- printk("\n");
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
va_end(args);
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index f84700be327..c2e4dce984d 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -199,14 +199,6 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
goto found;
entry = next;
}
- /* Check the remaining name entries */
- while (!IS_LAST_ENTRY(entry)) {
- struct ext2_xattr_entry *next =
- EXT2_XATTR_NEXT(entry);
- if ((char *)next >= end)
- goto bad_block;
- entry = next;
- }
if (ext2_xattr_cache_insert(bh))
ea_idebug(inode, "cache insert failed");
error = -ENODATA;
@@ -355,7 +347,7 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
/*
* ext2_xattr_set()
*
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode. Value
* is NULL to remove an existing extended attribute, and non-NULL to
* either replace an existing extended attribute, or create a new extended
* attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b3db2264942..045995c8ce5 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -20,6 +20,7 @@
#include <linux/ext3_jbd.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
/*
* balloc.c contains the blocks allocation and deallocation routines
@@ -39,6 +40,21 @@
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
+/*
+ * Calculate the block group number and offset, given a block number
+ */
+static void ext3_get_group_no_and_offset(struct super_block *sb,
+ ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
+{
+ struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+
+ blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
+ if (offsetp)
+ *offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
+ if (blockgrpp)
+ *blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
+}
+
/**
* ext3_get_group_desc() -- load group descriptor from disk
* @sb: super block
@@ -1885,3 +1901,253 @@ unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
return ext3_bg_num_gdb_meta(sb,group);
}
+
+/**
+ * ext3_trim_all_free -- function to trim all free space in alloc. group
+ * @sb: super block for file system
+ * @group: allocation group to trim
+ * @start: first group block to examine
+ * @max: last group block to examine
+ * @gdp: allocation group description structure
+ * @minblocks: minimum extent block count
+ *
+ * ext3_trim_all_free walks through group's block bitmap searching for free
+ * blocks. When the free block is found, it tries to allocate this block and
+ * consequent free block to get the biggest free extent possible, until it
+ * reaches any used block. Then issue a TRIM command on this extent and free
+ * the extent in the block bitmap. This is done until whole group is scanned.
+ */
+ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group,
+ ext3_grpblk_t start, ext3_grpblk_t max,
+ ext3_grpblk_t minblocks)
+{
+ handle_t *handle;
+ ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
+ ext3_fsblk_t discard_block;
+ struct ext3_sb_info *sbi;
+ struct buffer_head *gdp_bh, *bitmap_bh = NULL;
+ struct ext3_group_desc *gdp;
+ int err = 0, ret = 0;
+
+ /*
+ * We will update one block bitmap, and one group descriptor
+ */
+ handle = ext3_journal_start_sb(sb, 2);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ bitmap_bh = read_block_bitmap(sb, group);
+ if (!bitmap_bh) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ BUFFER_TRACE(bitmap_bh, "getting undo access");
+ err = ext3_journal_get_undo_access(handle, bitmap_bh);
+ if (err)
+ goto err_out;
+
+ gdp = ext3_get_group_desc(sb, group, &gdp_bh);
+ if (!gdp) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ BUFFER_TRACE(gdp_bh, "get_write_access");
+ err = ext3_journal_get_write_access(handle, gdp_bh);
+ if (err)
+ goto err_out;
+
+ free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+ sbi = EXT3_SB(sb);
+
+ /* Walk through the whole group */
+ while (start < max) {
+ start = bitmap_search_next_usable_block(start, bitmap_bh, max);
+ if (start < 0)
+ break;
+ next = start;
+
+ /*
+ * Allocate contiguous free extents by setting bits in the
+ * block bitmap
+ */
+ while (next < max
+ && claim_block(sb_bgl_lock(sbi, group),
+ next, bitmap_bh)) {
+ next++;
+ }
+
+ /* We did not claim any blocks */
+ if (next == start)
+ continue;
+
+ discard_block = (ext3_fsblk_t)start +
+ ext3_group_first_block_no(sb, group);
+
+ /* Update counters */
+ spin_lock(sb_bgl_lock(sbi, group));
+ le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
+ spin_unlock(sb_bgl_lock(sbi, group));
+ percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
+
+ /* Do not issue a TRIM on extents smaller than minblocks */
+ if ((next - start) < minblocks)
+ goto free_extent;
+
+ /* Send the TRIM command down to the device */
+ err = sb_issue_discard(sb, discard_block, next - start,
+ GFP_NOFS, 0);
+ count += (next - start);
+free_extent:
+ freed = 0;
+
+ /*
+ * Clear bits in the bitmap
+ */
+ for (bit = start; bit < next; bit++) {
+ BUFFER_TRACE(bitmap_bh, "clear bit");
+ if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
+ bit, bitmap_bh->b_data)) {
+ ext3_error(sb, __func__,
+ "bit already cleared for block "E3FSBLK,
+ (unsigned long)bit);
+ BUFFER_TRACE(bitmap_bh, "bit already cleared");
+ } else {
+ freed++;
+ }
+ }
+
+ /* Update couters */
+ spin_lock(sb_bgl_lock(sbi, group));
+ le16_add_cpu(&gdp->bg_free_blocks_count, freed);
+ spin_unlock(sb_bgl_lock(sbi, group));
+ percpu_counter_add(&sbi->s_freeblocks_counter, freed);
+
+ start = next;
+ if (err < 0) {
+ if (err != -EOPNOTSUPP)
+ ext3_warning(sb, __func__, "Discard command "
+ "returned error %d\n", err);
+ break;
+ }
+
+ if (fatal_signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched();
+
+ /* No more suitable extents */
+ if ((free_blocks - count) < minblocks)
+ break;
+ }
+
+ /* We dirtied the bitmap block */
+ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+ ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
+ if (!err)
+ err = ret;
+
+ /* And the group descriptor block */
+ BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
+ ret = ext3_journal_dirty_metadata(handle, gdp_bh);
+ if (!err)
+ err = ret;
+
+ ext3_debug("trimmed %d blocks in the group %d\n",
+ count, group);
+
+err_out:
+ if (err)
+ count = err;
+ ext3_journal_stop(handle);
+ brelse(bitmap_bh);
+
+ return count;
+}
+
+/**
+ * ext3_trim_fs() -- trim ioctl handle function
+ * @sb: superblock for filesystem
+ * @start: First Byte to trim
+ * @len: number of Bytes to trim from start
+ * @minlen: minimum extent length in Bytes
+ *
+ * ext3_trim_fs goes through all allocation groups containing Bytes from
+ * start to start+len. For each such a group ext3_trim_all_free function
+ * is invoked to trim all free space.
+ */
+int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+ ext3_grpblk_t last_block, first_block, free_blocks;
+ unsigned long first_group, last_group;
+ unsigned long group, ngroups;
+ struct ext3_group_desc *gdp;
+ struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+ uint64_t start, len, minlen, trimmed;
+ ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
+ int ret = 0;
+
+ start = range->start >> sb->s_blocksize_bits;
+ len = range->len >> sb->s_blocksize_bits;
+ minlen = range->minlen >> sb->s_blocksize_bits;
+ trimmed = 0;
+
+ if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)))
+ return -EINVAL;
+ if (start >= max_blks)
+ goto out;
+ if (start < le32_to_cpu(es->s_first_data_block)) {
+ len -= le32_to_cpu(es->s_first_data_block) - start;
+ start = le32_to_cpu(es->s_first_data_block);
+ }
+ if (start + len > max_blks)
+ len = max_blks - start;
+
+ ngroups = EXT3_SB(sb)->s_groups_count;
+ smp_rmb();
+
+ /* Determine first and last group to examine based on start and len */
+ ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
+ &first_group, &first_block);
+ ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len),
+ &last_group, &last_block);
+ last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
+ last_block = EXT3_BLOCKS_PER_GROUP(sb);
+
+ if (first_group > last_group)
+ return -EINVAL;
+
+ for (group = first_group; group <= last_group; group++) {
+ gdp = ext3_get_group_desc(sb, group, NULL);
+ if (!gdp)
+ break;
+
+ free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+ if (free_blocks < minlen)
+ continue;
+
+ if (len >= EXT3_BLOCKS_PER_GROUP(sb))
+ len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block);
+ else
+ last_block = first_block + len;
+
+ ret = ext3_trim_all_free(sb, group, first_block,
+ last_block, minlen);
+ if (ret < 0)
+ break;
+
+ trimmed += ret;
+ first_block = 0;
+ }
+
+ if (ret >= 0)
+ ret = 0;
+
+out:
+ range->len = trimmed * sb->s_blocksize;
+
+ return ret;
+}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index e2e72c367cf..34f0a072b93 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -69,25 +69,26 @@ int ext3_check_dir_entry (const char * function, struct inode * dir,
const char * error_msg = NULL;
const int rlen = ext3_rec_len_from_disk(de->rec_len);
- if (rlen < EXT3_DIR_REC_LEN(1))
+ if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
- else if (rlen % 4 != 0)
+ else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
- else if (rlen < EXT3_DIR_REC_LEN(de->name_len))
+ else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
error_msg = "directory entry across blocks";
- else if (le32_to_cpu(de->inode) >
- le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
+ else if (unlikely(le32_to_cpu(de->inode) >
+ le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
- if (error_msg != NULL)
+ if (unlikely(error_msg != NULL))
ext3_error (dir->i_sb, function,
"bad entry in directory #%lu: %s - "
"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
dir->i_ino, error_msg, offset,
(unsigned long) le32_to_cpu(de->inode),
rlen, de->name_len);
+
return error_msg == NULL ? 1 : 0;
}
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a9580617edd..ae94f6d949f 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2145,13 +2145,15 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle, bh);
+ if (ext3_journal_dirty_metadata(handle, bh))
+ return;
}
ext3_mark_inode_dirty(handle, inode);
truncate_restart_transaction(handle, inode);
if (bh) {
BUFFER_TRACE(bh, "retaking write access");
- ext3_journal_get_write_access(handle, bh);
+ if (ext3_journal_get_write_access(handle, bh))
+ return;
}
}
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 88974814783..fc080dd561f 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -276,7 +276,29 @@ group_add_out:
mnt_drop_write(filp->f_path.mnt);
return err;
}
+ case FITRIM: {
+ struct super_block *sb = inode->i_sb;
+ struct fstrim_range range;
+ int ret = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&range, (struct fstrim_range *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ ret = ext3_trim_fs(sb, &range);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user((struct fstrim_range *)arg, &range,
+ sizeof(range)))
+ return -EFAULT;
+
+ return 0;
+ }
default:
return -ENOTTY;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index bce9dce639b..b27ba71810e 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -858,6 +858,7 @@ static struct buffer_head *ext3_find_entry(struct inode *dir,
struct buffer_head * bh_use[NAMEI_RA_SIZE];
struct buffer_head * bh, *ret = NULL;
unsigned long start, block, b;
+ const u8 *name = entry->name;
int ra_max = 0; /* Number of bh's in the readahead
buffer, bh_use[] */
int ra_ptr = 0; /* Current index into readahead
@@ -871,6 +872,16 @@ static struct buffer_head *ext3_find_entry(struct inode *dir,
namelen = entry->len;
if (namelen > EXT3_NAME_LEN)
return NULL;
+ if ((namelen <= 2) && (name[0] == '.') &&
+ (name[1] == '.' || name[1] == 0)) {
+ /*
+ * "." or ".." will only be in the first block
+ * NFS may look up ".."; "." should be handled by the VFS
+ */
+ block = start = 0;
+ nblocks = 1;
+ goto restart;
+ }
if (is_dx(dir)) {
bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
/*
@@ -961,55 +972,35 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
int *err)
{
- struct super_block * sb;
+ struct super_block *sb = dir->i_sb;
struct dx_hash_info hinfo;
- u32 hash;
struct dx_frame frames[2], *frame;
- struct ext3_dir_entry_2 *de, *top;
struct buffer_head *bh;
unsigned long block;
int retval;
- int namelen = entry->len;
- const u8 *name = entry->name;
- sb = dir->i_sb;
- /* NFS may look up ".." - look at dx_root directory block */
- if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) {
- if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
- return NULL;
- } else {
- frame = frames;
- frame->bh = NULL; /* for dx_release() */
- frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
- dx_set_block(frame->at, 0); /* dx_root block is 0 */
- }
- hash = hinfo.hash;
+ if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
+ return NULL;
do {
block = dx_get_block(frame->at);
if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
goto errout;
- de = (struct ext3_dir_entry_2 *) bh->b_data;
- top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
- EXT3_DIR_REC_LEN(0));
- for (; de < top; de = ext3_next_entry(de)) {
- int off = (block << EXT3_BLOCK_SIZE_BITS(sb))
- + ((char *) de - bh->b_data);
-
- if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) {
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto errout;
- }
- if (ext3_match(namelen, name, de)) {
- *res_dir = de;
- dx_release(frames);
- return bh;
- }
+ retval = search_dirblock(bh, dir, entry,
+ block << EXT3_BLOCK_SIZE_BITS(sb),
+ res_dir);
+ if (retval == 1) {
+ dx_release(frames);
+ return bh;
}
- brelse (bh);
+ brelse(bh);
+ if (retval == -1) {
+ *err = ERR_BAD_DX_DIR;
+ goto errout;
+ }
+
/* Check to see if we should continue to search */
- retval = ext3_htree_next_block(dir, hash, frame,
+ retval = ext3_htree_next_block(dir, hinfo.hash, frame,
frames, NULL);
if (retval < 0) {
ext3_warning(sb, __func__,
@@ -1047,7 +1038,7 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str
return ERR_PTR(-EIO);
}
inode = ext3_iget(dir->i_sb, ino);
- if (unlikely(IS_ERR(inode))) {
+ if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ESTALE) {
ext3_error(dir->i_sb, __func__,
"deleted inode referenced: %lu",
@@ -1607,7 +1598,9 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
if (err)
goto journal_error;
}
- ext3_journal_dirty_metadata(handle, frames[0].bh);
+ err = ext3_journal_dirty_metadata(handle, frames[0].bh);
+ if (err)
+ goto journal_error;
}
de = do_split(handle, dir, &bh, frame, &hinfo, &err);
if (!de)
@@ -1644,8 +1637,13 @@ static int ext3_delete_entry (handle_t *handle,
if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
return -EIO;
if (de == de_del) {
+ int err;
+
BUFFER_TRACE(bh, "get_write_access");
- ext3_journal_get_write_access(handle, bh);
+ err = ext3_journal_get_write_access(handle, bh);
+ if (err)
+ goto journal_error;
+
if (pde)
pde->rec_len = ext3_rec_len_to_disk(
ext3_rec_len_from_disk(pde->rec_len) +
@@ -1654,7 +1652,12 @@ static int ext3_delete_entry (handle_t *handle,
de->inode = 0;
dir->i_version++;
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle, bh);
+ err = ext3_journal_dirty_metadata(handle, bh);
+ if (err) {
+journal_error:
+ ext3_std_error(dir->i_sb, err);
+ return err;
+ }
return 0;
}
i += ext3_rec_len_from_disk(de->rec_len);
@@ -1762,7 +1765,7 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
{
handle_t *handle;
struct inode * inode;
- struct buffer_head * dir_block;
+ struct buffer_head * dir_block = NULL;
struct ext3_dir_entry_2 * de;
int err, retries = 0;
@@ -1790,15 +1793,14 @@ retry:
inode->i_fop = &ext3_dir_operations;
inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
dir_block = ext3_bread (handle, inode, 0, 1, &err);
- if (!dir_block) {
- drop_nlink(inode); /* is this nlink == 0? */
- unlock_new_inode(inode);
- ext3_mark_inode_dirty(handle, inode);
- iput (inode);
- goto out_stop;
- }
+ if (!dir_block)
+ goto out_clear_inode;
+
BUFFER_TRACE(dir_block, "get_write_access");
- ext3_journal_get_write_access(handle, dir_block);
+ err = ext3_journal_get_write_access(handle, dir_block);
+ if (err)
+ goto out_clear_inode;
+
de = (struct ext3_dir_entry_2 *) dir_block->b_data;
de->inode = cpu_to_le32(inode->i_ino);
de->name_len = 1;
@@ -1814,11 +1816,16 @@ retry:
ext3_set_de_type(dir->i_sb, de, S_IFDIR);
inode->i_nlink = 2;
BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle, dir_block);
- brelse (dir_block);
- ext3_mark_inode_dirty(handle, inode);
- err = ext3_add_entry (handle, dentry, inode);
+ err = ext3_journal_dirty_metadata(handle, dir_block);
+ if (err)
+ goto out_clear_inode;
+
+ err = ext3_mark_inode_dirty(handle, inode);
+ if (!err)
+ err = ext3_add_entry (handle, dentry, inode);
+
if (err) {
+out_clear_inode:
inode->i_nlink = 0;
unlock_new_inode(inode);
ext3_mark_inode_dirty(handle, inode);
@@ -1827,10 +1834,14 @@ retry:
}
inc_nlink(dir);
ext3_update_dx_flag(dir);
- ext3_mark_inode_dirty(handle, dir);
+ err = ext3_mark_inode_dirty(handle, dir);
+ if (err)
+ goto out_clear_inode;
+
d_instantiate(dentry, inode);
unlock_new_inode(inode);
out_stop:
+ brelse(dir_block);
ext3_journal_stop(handle);
if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
goto retry;
@@ -2353,7 +2364,9 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
goto end_rename;
} else {
BUFFER_TRACE(new_bh, "get write access");
- ext3_journal_get_write_access(handle, new_bh);
+ retval = ext3_journal_get_write_access(handle, new_bh);
+ if (retval)
+ goto journal_error;
new_de->inode = cpu_to_le32(old_inode->i_ino);
if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
EXT3_FEATURE_INCOMPAT_FILETYPE))
@@ -2362,7 +2375,9 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, new_dir);
BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle, new_bh);
+ retval = ext3_journal_dirty_metadata(handle, new_bh);
+ if (retval)
+ goto journal_error;
brelse(new_bh);
new_bh = NULL;
}
@@ -2411,10 +2426,17 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
ext3_update_dx_flag(old_dir);
if (dir_bh) {
BUFFER_TRACE(dir_bh, "get_write_access");
- ext3_journal_get_write_access(handle, dir_bh);
+ retval = ext3_journal_get_write_access(handle, dir_bh);
+ if (retval)
+ goto journal_error;
PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle, dir_bh);
+ retval = ext3_journal_dirty_metadata(handle, dir_bh);
+ if (retval) {
+journal_error:
+ ext3_std_error(new_dir->i_sb, retval);
+ goto end_rename;
+ }
drop_nlink(old_dir);
if (new_inode) {
drop_nlink(new_inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index e746d30b123..108b142e11e 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -249,7 +249,11 @@ static int setup_new_group_blocks(struct super_block *sb,
memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
unlock_buffer(gdb);
- ext3_journal_dirty_metadata(handle, gdb);
+ err = ext3_journal_dirty_metadata(handle, gdb);
+ if (err) {
+ brelse(gdb);
+ goto exit_bh;
+ }
ext3_set_bit(bit, bh->b_data);
brelse(gdb);
}
@@ -269,7 +273,11 @@ static int setup_new_group_blocks(struct super_block *sb,
err = PTR_ERR(gdb);
goto exit_bh;
}
- ext3_journal_dirty_metadata(handle, gdb);
+ err = ext3_journal_dirty_metadata(handle, gdb);
+ if (err) {
+ brelse(gdb);
+ goto exit_bh;
+ }
ext3_set_bit(bit, bh->b_data);
brelse(gdb);
}
@@ -295,7 +303,11 @@ static int setup_new_group_blocks(struct super_block *sb,
err = PTR_ERR(it);
goto exit_bh;
}
- ext3_journal_dirty_metadata(handle, it);
+ err = ext3_journal_dirty_metadata(handle, it);
+ if (err) {
+ brelse(it);
+ goto exit_bh;
+ }
brelse(it);
ext3_set_bit(bit, bh->b_data);
}
@@ -306,7 +318,9 @@ static int setup_new_group_blocks(struct super_block *sb,
mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
bh->b_data);
- ext3_journal_dirty_metadata(handle, bh);
+ err = ext3_journal_dirty_metadata(handle, bh);
+ if (err)
+ goto exit_bh;
brelse(bh);
/* Mark unused entries in inode bitmap used */
@@ -319,7 +333,7 @@ static int setup_new_group_blocks(struct super_block *sb,
mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
bh->b_data);
- ext3_journal_dirty_metadata(handle, bh);
+ err = ext3_journal_dirty_metadata(handle, bh);
exit_bh:
brelse(bh);
@@ -503,12 +517,19 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
* reserved inode, and will become GDT blocks (primary and backup).
*/
data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
- ext3_journal_dirty_metadata(handle, dind);
+ err = ext3_journal_dirty_metadata(handle, dind);
+ if (err)
+ goto exit_group_desc;
brelse(dind);
+ dind = NULL;
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
- ext3_mark_iloc_dirty(handle, inode, &iloc);
+ err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+ if (err)
+ goto exit_group_desc;
memset((*primary)->b_data, 0, sb->s_blocksize);
- ext3_journal_dirty_metadata(handle, *primary);
+ err = ext3_journal_dirty_metadata(handle, *primary);
+ if (err)
+ goto exit_group_desc;
o_group_desc = EXT3_SB(sb)->s_group_desc;
memcpy(n_group_desc, o_group_desc,
@@ -519,10 +540,14 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
kfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
- ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ if (err)
+ goto exit_inode;
return 0;
+exit_group_desc:
+ kfree(n_group_desc);
exit_inode:
//ext3_journal_release_buffer(handle, iloc.bh);
brelse(iloc.bh);
@@ -706,16 +731,20 @@ static void update_backups(struct super_block *sb,
}
ext3_debug("update metadata backup %#04lx\n",
(unsigned long)bh->b_blocknr);
- if ((err = ext3_journal_get_write_access(handle, bh)))
+ if ((err = ext3_journal_get_write_access(handle, bh))) {
+ brelse(bh);
break;
+ }
lock_buffer(bh);
memcpy(bh->b_data, data, size);
if (rest)
memset(bh->b_data + size, 0, rest);
set_buffer_uptodate(bh);
unlock_buffer(bh);
- ext3_journal_dirty_metadata(handle, bh);
+ err = ext3_journal_dirty_metadata(handle, bh);
brelse(bh);
+ if (err)
+ break;
}
if ((err2 = ext3_journal_stop(handle)) && !err)
err = err2;
@@ -922,7 +951,9 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
/* Update the global fs size fields */
sbi->s_groups_count++;
- ext3_journal_dirty_metadata(handle, primary);
+ err = ext3_journal_dirty_metadata(handle, primary);
+ if (err)
+ goto exit_journal;
/* Update the reserved block counts only once the new group is
* active. */
@@ -934,7 +965,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT3_INODES_PER_GROUP(sb));
- ext3_journal_dirty_metadata(handle, sbi->s_sbh);
+ err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
exit_journal:
mutex_unlock(&sbi->s_resize_lock);
@@ -1064,8 +1095,14 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
goto exit_put;
}
es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
- ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
+ if (err) {
+ ext3_warning(sb, __func__,
+ "error %d on journal dirty metadata", err);
+ ext3_journal_stop(handle);
+ goto exit_put;
+ }
ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
o_blocks_count, o_blocks_count + add);
ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 77ce1616f72..b7d0554631e 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -143,12 +143,16 @@ void ext3_journal_abort_handle(const char *caller, const char *err_fn,
void ext3_msg(struct super_block *sb, const char *prefix,
const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk("%sEXT3-fs (%s): ", prefix, sb->s_id);
- vprintk(fmt, args);
- printk("\n");
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
va_end(args);
}
@@ -195,15 +199,20 @@ static void ext3_handle_error(struct super_block *sb)
sb->s_id);
}
-void ext3_error (struct super_block * sb, const char * function,
- const char * fmt, ...)
+void ext3_error(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function);
- vprintk(fmt, args);
- printk("\n");
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
+ sb->s_id, function, &vaf);
+
va_end(args);
ext3_handle_error(sb);
@@ -274,15 +283,20 @@ void __ext3_std_error (struct super_block * sb, const char * function,
* case we take the easy way out and panic immediately.
*/
-void ext3_abort (struct super_block * sb, const char * function,
- const char * fmt, ...)
+void ext3_abort(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function);
- vprintk(fmt, args);
- printk("\n");
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
+ sb->s_id, function, &vaf);
+
va_end(args);
if (test_opt(sb, ERRORS_PANIC))
@@ -300,16 +314,20 @@ void ext3_abort (struct super_block * sb, const char * function,
journal_abort(EXT3_SB(sb)->s_journal, -EIO);
}
-void ext3_warning (struct super_block * sb, const char * function,
- const char * fmt, ...)
+void ext3_warning(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ",
- sb->s_id, function);
- vprintk(fmt, args);
- printk("\n");
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
+ sb->s_id, function, &vaf);
+
va_end(args);
}
@@ -1848,13 +1866,15 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount;
}
- if (generic_check_addressable(sb->s_blocksize_bits,
- le32_to_cpu(es->s_blocks_count))) {
+ err = generic_check_addressable(sb->s_blocksize_bits,
+ le32_to_cpu(es->s_blocks_count));
+ if (err) {
ext3_msg(sb, KERN_ERR,
"error: filesystem is too large to mount safely");
if (sizeof(sector_t) < 8)
ext3_msg(sb, KERN_ERR,
"error: CONFIG_LBDAF not enabled");
+ ret = err;
goto failed_mount;
}
@@ -2297,7 +2317,7 @@ static int ext3_load_journal(struct super_block *sb,
EXT3_SB(sb)->s_journal = journal;
ext3_clear_journal_err(sb, es);
- if (journal_devnum &&
+ if (!really_read_only && journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
es->s_journal_dev = cpu_to_le32(journal_devnum);
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index e69dc6dfaa8..32e6cc23bd9 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -925,7 +925,7 @@ ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
/*
* ext3_xattr_set_handle()
*
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode. Value
* is NULL to remove an existing extended attribute, and non-NULL to
* either replace an existing extended attribute, or create a new extended
* attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 14c3af26c67..adf96b82278 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -592,7 +592,8 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
* Account for the allocated meta blocks. We will never
* fail EDQUOT for metdata, but we do account for it.
*/
- if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
+ if (!(*errp) &&
+ ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ece76fb6a40..164c56092e5 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -60,9 +60,13 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
return (ext4_filetype_table[filetype]);
}
-
+/*
+ * Return 0 if the directory entry is OK, and 1 if there is a problem
+ *
+ * Note: this is the opposite of what ext2 and ext3 historically returned...
+ */
int __ext4_check_dir_entry(const char *function, unsigned int line,
- struct inode *dir,
+ struct inode *dir, struct file *filp,
struct ext4_dir_entry_2 *de,
struct buffer_head *bh,
unsigned int offset)
@@ -71,26 +75,37 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
const int rlen = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
- if (rlen < EXT4_DIR_REC_LEN(1))
+ if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
- else if (rlen % 4 != 0)
+ else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
- else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
+ else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ else if (unlikely(((char *) de - bh->b_data) + rlen >
+ dir->i_sb->s_blocksize))
error_msg = "directory entry across blocks";
- else if (le32_to_cpu(de->inode) >
- le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))
+ else if (unlikely(le32_to_cpu(de->inode) >
+ le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
+ else
+ return 0;
- if (error_msg != NULL)
- ext4_error_inode(dir, function, line, bh->b_blocknr,
- "bad entry in directory: %s - "
- "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset%bh->b_size), offset,
- le32_to_cpu(de->inode),
- rlen, de->name_len);
- return error_msg == NULL ? 1 : 0;
+ if (filp)
+ ext4_error_file(filp, function, line, bh ? bh->b_blocknr : 0,
+ "bad entry in directory: %s - offset=%u(%u), "
+ "inode=%u, rec_len=%d, name_len=%d",
+ error_msg, (unsigned) (offset%bh->b_size),
+ offset, le32_to_cpu(de->inode),
+ rlen, de->name_len);
+ else
+ ext4_error_inode(dir, function, line, bh ? bh->b_blocknr : 0,
+ "bad entry in directory: %s - offset=%u(%u), "
+ "inode=%u, rec_len=%d, name_len=%d",
+ error_msg, (unsigned) (offset%bh->b_size),
+ offset, le32_to_cpu(de->inode),
+ rlen, de->name_len);
+
+ return 1;
}
static int ext4_readdir(struct file *filp,
@@ -152,8 +167,9 @@ static int ext4_readdir(struct file *filp,
*/
if (!bh) {
if (!dir_has_error) {
- EXT4_ERROR_INODE(inode, "directory "
- "contains a hole at offset %Lu",
+ EXT4_ERROR_FILE(filp, 0,
+ "directory contains a "
+ "hole at offset %llu",
(unsigned long long) filp->f_pos);
dir_has_error = 1;
}
@@ -194,8 +210,8 @@ revalidate:
while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
- if (!ext4_check_dir_entry(inode, de,
- bh, offset)) {
+ if (ext4_check_dir_entry(inode, filp, de,
+ bh, offset)) {
/*
* On error, skip the f_pos to the next block
*/
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 94ce3d7a1c4..bab2387fba4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -62,8 +62,8 @@
#define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \
ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
-#define EXT4_ERROR_FILE(file, fmt, a...) \
- ext4_error_file(__func__, __LINE__, (file), (fmt), ## a)
+#define EXT4_ERROR_FILE(file, block, fmt, a...) \
+ ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
/* data type for block offset of block group */
typedef int ext4_grpblk_t;
@@ -561,22 +561,6 @@ struct ext4_new_group_data {
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
#endif
-
-/*
- * Mount options
- */
-struct ext4_mount_options {
- unsigned long s_mount_opt;
- uid_t s_resuid;
- gid_t s_resgid;
- unsigned long s_commit_interval;
- u32 s_min_batch_time, s_max_batch_time;
-#ifdef CONFIG_QUOTA
- int s_jquota_fmt;
- char *s_qf_names[MAXQUOTAS];
-#endif
-};
-
/* Max physical block we can addres w/o extents */
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
@@ -709,6 +693,8 @@ do { \
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \
ext4_decode_extra_time(&(inode)->xtime, \
raw_inode->xtime ## _extra); \
+ else \
+ (inode)->xtime.tv_nsec = 0; \
} while (0)
#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
@@ -719,6 +705,8 @@ do { \
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
ext4_decode_extra_time(&(einode)->xtime, \
raw_inode->xtime ## _extra); \
+ else \
+ (einode)->xtime.tv_nsec = 0; \
} while (0)
#define i_disk_version osd1.linux1.l_i_version
@@ -750,12 +738,13 @@ do { \
/*
* storage for cached extent
+ * If ec_len == 0, then the cache is invalid.
+ * If ec_start == 0, then the cache represents a gap (null mapping)
*/
struct ext4_ext_cache {
ext4_fsblk_t ec_start;
ext4_lblk_t ec_block;
__u32 ec_len; /* must be 32bit to return holes */
- __u32 ec_type;
};
/*
@@ -774,10 +763,12 @@ struct ext4_inode_info {
* near to their parent directory's inode.
*/
ext4_group_t i_block_group;
+ ext4_lblk_t i_dir_start_lookup;
+#if (BITS_PER_LONG < 64)
unsigned long i_state_flags; /* Dynamic state flags */
+#endif
unsigned long i_flags;
- ext4_lblk_t i_dir_start_lookup;
#ifdef CONFIG_EXT4_FS_XATTR
/*
* Extended attributes can be read independently of the main file
@@ -820,7 +811,7 @@ struct ext4_inode_info {
*/
struct rw_semaphore i_data_sem;
struct inode vfs_inode;
- struct jbd2_inode jinode;
+ struct jbd2_inode *jinode;
struct ext4_ext_cache i_cached_extent;
/*
@@ -840,14 +831,12 @@ struct ext4_inode_info {
unsigned int i_reserved_data_blocks;
unsigned int i_reserved_meta_blocks;
unsigned int i_allocated_meta_blocks;
- unsigned short i_delalloc_reserved_flag;
- sector_t i_da_metadata_calc_last_lblock;
+ ext4_lblk_t i_da_metadata_calc_last_lblock;
int i_da_metadata_calc_len;
/* on-disk additional length */
__u16 i_extra_isize;
- spinlock_t i_block_reservation_lock;
#ifdef CONFIG_QUOTA
/* quota space reservation, managed internally by quota code */
qsize_t i_reserved_quota;
@@ -856,9 +845,11 @@ struct ext4_inode_info {
/* completed IOs that might need unwritten extents handling */
struct list_head i_completed_io_list;
spinlock_t i_completed_io_lock;
+ atomic_t i_ioend_count; /* Number of outstanding io_end structs */
/* current io_end structure for async DIO write*/
ext4_io_end_t *cur_aio_dio;
- atomic_t i_ioend_count; /* Number of outstanding io_end structs */
+
+ spinlock_t i_block_reservation_lock;
/*
* Transactions that contain inode's metadata needed to complete
@@ -917,11 +908,20 @@ struct ext4_inode_info {
#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */
#define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */
-#define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt
-#define set_opt(o, opt) o |= EXT4_MOUNT_##opt
+#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
+ ~EXT4_MOUNT_##opt
+#define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \
+ EXT4_MOUNT_##opt
#define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \
EXT4_MOUNT_##opt)
+#define clear_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 &= \
+ ~EXT4_MOUNT2_##opt
+#define set_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 |= \
+ EXT4_MOUNT2_##opt
+#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
+ EXT4_MOUNT2_##opt)
+
#define ext4_set_bit ext2_set_bit
#define ext4_set_bit_atomic ext2_set_bit_atomic
#define ext4_clear_bit ext2_clear_bit
@@ -1087,6 +1087,7 @@ struct ext4_sb_info {
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
struct buffer_head **s_group_desc;
unsigned int s_mount_opt;
+ unsigned int s_mount_opt2;
unsigned int s_mount_flags;
ext4_fsblk_t s_sb_block;
uid_t s_resuid;
@@ -1237,24 +1238,39 @@ enum {
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
EXT4_STATE_NEWENTRY, /* File just added to dir */
+ EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */
};
-#define EXT4_INODE_BIT_FNS(name, field) \
+#define EXT4_INODE_BIT_FNS(name, field, offset) \
static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
{ \
- return test_bit(bit, &EXT4_I(inode)->i_##field); \
+ return test_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
} \
static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
{ \
- set_bit(bit, &EXT4_I(inode)->i_##field); \
+ set_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
} \
static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
{ \
- clear_bit(bit, &EXT4_I(inode)->i_##field); \
+ clear_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
}
-EXT4_INODE_BIT_FNS(flag, flags)
-EXT4_INODE_BIT_FNS(state, state_flags)
+EXT4_INODE_BIT_FNS(flag, flags, 0)
+#if (BITS_PER_LONG < 64)
+EXT4_INODE_BIT_FNS(state, state_flags, 0)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+ (ei)->i_state_flags = 0;
+}
+#else
+EXT4_INODE_BIT_FNS(state, flags, 32)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+ /* We depend on the fact that callers will set i_flags */
+}
+#endif
#else
/* Assume that user mode programs are passing in an ext4fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
@@ -1642,10 +1658,12 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb,
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
+ struct file *,
struct ext4_dir_entry_2 *,
struct buffer_head *, unsigned int);
-#define ext4_check_dir_entry(dir, de, bh, offset) \
- __ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset))
+#define ext4_check_dir_entry(dir, filp, de, bh, offset) \
+ unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
+ (de), (bh), (offset)))
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent);
@@ -1653,6 +1671,7 @@ extern void ext4_htree_free_dir_info(struct dir_private_info *p);
/* fsync.c */
extern int ext4_sync_file(struct file *, int);
+extern int ext4_flush_completed_IO(struct inode *);
/* hash.c */
extern int ext4fs_dirhash(const char *name, int len, struct
@@ -1752,8 +1771,8 @@ extern void ext4_error_inode(struct inode *, const char *, unsigned int,
ext4_fsblk_t, const char *, ...)
__attribute__ ((format (printf, 5, 6)));
extern void ext4_error_file(struct file *, const char *, unsigned int,
- const char *, ...)
- __attribute__ ((format (printf, 4, 5)));
+ ext4_fsblk_t, const char *, ...)
+ __attribute__ ((format (printf, 5, 6)));
extern void __ext4_std_error(struct super_block *, const char *,
unsigned int, int);
extern void __ext4_abort(struct super_block *, const char *, unsigned int,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 28ce70fd9cd..2e29abb30f7 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -119,10 +119,6 @@ struct ext4_ext_path {
* structure for external API
*/
-#define EXT4_EXT_CACHE_NO 0
-#define EXT4_EXT_CACHE_GAP 1
-#define EXT4_EXT_CACHE_EXTENT 2
-
/*
* to be called by ext4_ext_walk_space()
* negative retcode - error
@@ -197,7 +193,7 @@ static inline unsigned short ext_depth(struct inode *inode)
static inline void
ext4_ext_invalidate_cache(struct inode *inode)
{
- EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
+ EXT4_I(inode)->i_cached_extent.ec_len = 0;
}
static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
@@ -278,7 +274,7 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
}
extern int ext4_ext_calc_metadata_amount(struct inode *inode,
- sector_t lblocks);
+ ext4_lblk_t lblocks);
extern int ext4_extent_tree_init(handle_t *, struct inode *);
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
int num,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b0bd792c58c..d8b992e658c 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -253,7 +253,7 @@ static inline int ext4_journal_force_commit(journal_t *journal)
static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
{
if (ext4_handle_valid(handle))
- return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
+ return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
return 0;
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0554c48cb1f..e910720e8bb 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -117,11 +117,33 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_extent *ex;
depth = path->p_depth;
- /* try to predict block placement */
+ /*
+ * Try to predict block placement assuming that we are
+ * filling in a file which will eventually be
+ * non-sparse --- i.e., in the case of libbfd writing
+ * an ELF object sections out-of-order but in a way
+ * the eventually results in a contiguous object or
+ * executable file, or some database extending a table
+ * space file. However, this is actually somewhat
+ * non-ideal if we are writing a sparse file such as
+ * qemu or KVM writing a raw image file that is going
+ * to stay fairly sparse, since it will end up
+ * fragmenting the file system's free space. Maybe we
+ * should have some hueristics or some way to allow
+ * userspace to pass a hint to file system,
+ * especiially if the latter case turns out to be
+ * common.
+ */
ex = path[depth].p_ext;
- if (ex)
- return (ext4_ext_pblock(ex) +
- (block - le32_to_cpu(ex->ee_block)));
+ if (ex) {
+ ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
+ ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
+
+ if (block > ext_block)
+ return ext_pblk + (block - ext_block);
+ else
+ return ext_pblk - (ext_block - block);
+ }
/* it looks like index is empty;
* try to find starting block from index itself */
@@ -244,7 +266,7 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
* to allocate @blocks
* Worse case is one block per extent
*/
-int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
+int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
int idxs, num = 0;
@@ -1872,12 +1894,10 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
cbex.ec_block = start;
cbex.ec_len = end - start;
cbex.ec_start = 0;
- cbex.ec_type = EXT4_EXT_CACHE_GAP;
} else {
cbex.ec_block = le32_to_cpu(ex->ee_block);
cbex.ec_len = ext4_ext_get_actual_len(ex);
cbex.ec_start = ext4_ext_pblock(ex);
- cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
}
if (unlikely(cbex.ec_len == 0)) {
@@ -1917,13 +1937,12 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
static void
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
- __u32 len, ext4_fsblk_t start, int type)
+ __u32 len, ext4_fsblk_t start)
{
struct ext4_ext_cache *cex;
BUG_ON(len == 0);
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
cex = &EXT4_I(inode)->i_cached_extent;
- cex->ec_type = type;
cex->ec_block = block;
cex->ec_len = len;
cex->ec_start = start;
@@ -1976,15 +1995,18 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
}
ext_debug(" -> %u:%lu\n", lblock, len);
- ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
+ ext4_ext_put_in_cache(inode, lblock, len, 0);
}
+/*
+ * Return 0 if cache is invalid; 1 if the cache is valid
+ */
static int
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
struct ext4_extent *ex)
{
struct ext4_ext_cache *cex;
- int ret = EXT4_EXT_CACHE_NO;
+ int ret = 0;
/*
* We borrow i_block_reservation_lock to protect i_cached_extent
@@ -1993,11 +2015,9 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
cex = &EXT4_I(inode)->i_cached_extent;
/* has cache valid data? */
- if (cex->ec_type == EXT4_EXT_CACHE_NO)
+ if (cex->ec_len == 0)
goto errout;
- BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
- cex->ec_type != EXT4_EXT_CACHE_EXTENT);
if (in_range(block, cex->ec_block, cex->ec_len)) {
ex->ee_block = cpu_to_le32(cex->ec_block);
ext4_ext_store_pblock(ex, cex->ec_start);
@@ -2005,7 +2025,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
ext_debug("%u cached by %u:%u:%llu\n",
block,
cex->ec_block, cex->ec_len, cex->ec_start);
- ret = cex->ec_type;
+ ret = 1;
}
errout:
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
@@ -3082,7 +3102,7 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
* Handle EOFBLOCKS_FL flag, clearing it if necessary
*/
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
- struct ext4_map_blocks *map,
+ ext4_lblk_t lblk,
struct ext4_ext_path *path,
unsigned int len)
{
@@ -3112,7 +3132,7 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
* this turns out to be false, we can bail out from this
* function immediately.
*/
- if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
+ if (lblk + len < le32_to_cpu(last_ex->ee_block) +
ext4_ext_get_actual_len(last_ex))
return 0;
/*
@@ -3168,8 +3188,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
path);
if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
- err = check_eofblocks_fl(handle, inode, map, path,
- map->m_len);
+ err = check_eofblocks_fl(handle, inode, map->m_lblk,
+ path, map->m_len);
} else
err = ret;
goto out2;
@@ -3199,7 +3219,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
- err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
+ map->m_len);
if (err < 0)
goto out2;
}
@@ -3276,7 +3297,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent_header *eh;
struct ext4_extent newex, *ex;
ext4_fsblk_t newblock;
- int err = 0, depth, ret, cache_type;
+ int err = 0, depth, ret;
unsigned int allocated = 0;
struct ext4_allocation_request ar;
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3285,9 +3306,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
map->m_lblk, map->m_len, inode->i_ino);
/* check in cache */
- cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
- if (cache_type) {
- if (cache_type == EXT4_EXT_CACHE_GAP) {
+ if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+ if (!newex.ee_start_lo && !newex.ee_start_hi) {
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* block isn't allocated yet and
@@ -3296,7 +3316,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
goto out2;
}
/* we should allocate requested block */
- } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
+ } else {
/* block is already allocated */
newblock = map->m_lblk
- le32_to_cpu(newex.ee_block)
@@ -3305,8 +3325,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
allocated = ext4_ext_get_actual_len(&newex) -
(map->m_lblk - le32_to_cpu(newex.ee_block));
goto out;
- } else {
- BUG();
}
}
@@ -3357,8 +3375,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
/* Do not put uninitialized extent in the cache */
if (!ext4_ext_is_uninitialized(ex)) {
ext4_ext_put_in_cache(inode, ee_block,
- ee_len, ee_start,
- EXT4_EXT_CACHE_EXTENT);
+ ee_len, ee_start);
goto out;
}
ret = ext4_ext_handle_uninitialized_extents(handle,
@@ -3456,7 +3473,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
map->m_flags |= EXT4_MAP_UNINIT;
}
- err = check_eofblocks_fl(handle, inode, map, path, ar.len);
+ err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
if (err)
goto out2;
@@ -3490,8 +3507,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* when it is _not_ an uninitialized extent.
*/
if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
- ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
- EXT4_EXT_CACHE_EXTENT);
+ ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
ext4_update_inode_fsync_trans(handle, inode, 1);
} else
ext4_update_inode_fsync_trans(handle, inode, 0);
@@ -3519,6 +3535,12 @@ void ext4_ext_truncate(struct inode *inode)
int err = 0;
/*
+ * finish any pending end_io work so we won't run the risk of
+ * converting any truncated blocks to initialized later
+ */
+ ext4_flush_completed_IO(inode);
+
+ /*
* probably first extent we're gonna free will be last in block
*/
err = ext4_writepage_trans_blocks(inode);
@@ -3767,7 +3789,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
logical = (__u64)newex->ec_block << blksize_bits;
- if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
+ if (newex->ec_start == 0) {
pgoff_t offset;
struct page *page;
struct buffer_head *bh = NULL;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5a5c55ddcee..bb003dc9fff 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -104,6 +104,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
{
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
@@ -127,6 +128,27 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
ext4_mark_super_dirty(sb);
}
}
+ /*
+ * Set up the jbd2_inode if we are opening the inode for
+ * writing and the journal is present
+ */
+ if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
+ struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
+
+ spin_lock(&inode->i_lock);
+ if (!ei->jinode) {
+ if (!jinode) {
+ spin_unlock(&inode->i_lock);
+ return -ENOMEM;
+ }
+ ei->jinode = jinode;
+ jbd2_journal_init_jbd_inode(ei->jinode, inode);
+ jinode = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+ if (unlikely(jinode != NULL))
+ jbd2_free_inode(jinode);
+ }
return dquot_file_open(inode, filp);
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index c1a7bc923cf..7829b287822 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -75,7 +75,7 @@ static void dump_completed_IO(struct inode * inode)
* to written.
* The function return the number of pending IOs on success.
*/
-static int flush_completed_IO(struct inode *inode)
+extern int ext4_flush_completed_IO(struct inode *inode)
{
ext4_io_end_t *io;
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -169,7 +169,7 @@ int ext4_sync_file(struct file *file, int datasync)
if (inode->i_sb->s_flags & MS_RDONLY)
return 0;
- ret = flush_completed_IO(inode);
+ ret = ext4_flush_completed_IO(inode);
if (ret < 0)
return ret;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 1ce240a23eb..eb9097aec6f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1027,7 +1027,7 @@ got:
inode->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock);
- ei->i_state_flags = 0;
+ ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e659597b690..e80fc513eac 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -40,6 +40,7 @@
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -54,10 +55,17 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
loff_t new_size)
{
trace_ext4_begin_ordered_truncate(inode, new_size);
- return jbd2_journal_begin_ordered_truncate(
- EXT4_SB(inode->i_sb)->s_journal,
- &EXT4_I(inode)->jinode,
- new_size);
+ /*
+ * If jinode is zero, then we never opened the file for
+ * writing, so there's no need to call
+ * jbd2_journal_begin_ordered_truncate() since there's no
+ * outstanding writes we need to flush.
+ */
+ if (!EXT4_I(inode)->jinode)
+ return 0;
+ return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
+ EXT4_I(inode)->jinode,
+ new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned long offset);
@@ -552,7 +560,7 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
}
/**
- * ext4_blks_to_allocate: Look up the block map and count the number
+ * ext4_blks_to_allocate - Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
* @branch: chain of indirect blocks
@@ -591,13 +599,19 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
/**
* ext4_alloc_blocks: multiple allocate blocks needed for a branch
+ * @handle: handle for this transaction
+ * @inode: inode which needs allocated blocks
+ * @iblock: the logical block to start allocated at
+ * @goal: preferred physical block of allocation
* @indirect_blks: the number of blocks need to allocate for indirect
* blocks
- *
+ * @blks: number of desired blocks
* @new_blocks: on return it will store the new block numbers for
* the indirect blocks(if needed) and the first direct block,
- * @blks: on return it will store the total number of allocated
- * direct blocks
+ * @err: on return it will store the error code
+ *
+ * This function will return the number of blocks allocated as
+ * requested by the passed-in parameters.
*/
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, ext4_fsblk_t goal,
@@ -711,9 +725,11 @@ failed_out:
/**
* ext4_alloc_branch - allocate and set up a chain of blocks.
+ * @handle: handle for this transaction
* @inode: owner
* @indirect_blks: number of allocated indirect blocks
* @blks: number of allocated direct blocks
+ * @goal: preferred place for allocation
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
@@ -826,6 +842,7 @@ failed:
/**
* ext4_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
* @inode: owner
* @block: (logical) number of block we are adding
* @chain: chain of indirect blocks (with a missing link - see
@@ -1081,7 +1098,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode,
* Calculate the number of metadata blocks need to reserve
* to allocate a block located at @lblock
*/
-static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
+static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return ext4_ext_calc_metadata_amount(inode, lblock);
@@ -1320,7 +1337,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
* avoid double accounting
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
- EXT4_I(inode)->i_delalloc_reserved_flag = 1;
+ ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
/*
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
@@ -1350,7 +1367,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
ext4_da_update_reserve_space(inode, retval, 1);
}
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
- EXT4_I(inode)->i_delalloc_reserved_flag = 0;
+ ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
@@ -1878,7 +1895,7 @@ static int ext4_journalled_write_end(struct file *file,
/*
* Reserve a single block located at lblock
*/
-static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
+static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
{
int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2239,7 +2256,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
* affects functions in many different parts of the allocation
* call path. This flag exists primarily because we don't
* want to change *many* call functions, so ext4_map_blocks()
- * will set the magic i_delalloc_reserved_flag once the
+ * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
* inode's allocation semaphore is taken.
*
* If the blocks in questions were delalloc blocks, set
@@ -3720,8 +3737,7 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
retry:
io_end = ext4_init_io_end(inode, GFP_ATOMIC);
if (!io_end) {
- if (printk_ratelimit())
- printk(KERN_WARNING "%s: allocation fail\n", __func__);
+ pr_warning_ratelimited("%s: allocation fail\n", __func__);
schedule();
goto retry;
}
@@ -4045,7 +4061,7 @@ int ext4_block_truncate_page(handle_t *handle,
if (ext4_should_journal_data(inode)) {
err = ext4_handle_dirty_metadata(handle, inode, bh);
} else {
- if (ext4_should_order_data(inode))
+ if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
err = ext4_jbd2_file_inode(handle, inode);
mark_buffer_dirty(bh);
}
@@ -4169,6 +4185,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
{
__le32 *p;
int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+ int err;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
flags |= EXT4_FREE_BLOCKS_METADATA;
@@ -4184,11 +4201,23 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle, inode, bh);
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (unlikely(err)) {
+ ext4_std_error(inode->i_sb, err);
+ return 1;
+ }
+ }
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err)) {
+ ext4_std_error(inode->i_sb, err);
+ return 1;
+ }
+ err = ext4_truncate_restart_trans(handle, inode,
+ blocks_for_truncate(inode));
+ if (unlikely(err)) {
+ ext4_std_error(inode->i_sb, err);
+ return 1;
}
- ext4_mark_inode_dirty(handle, inode);
- ext4_truncate_restart_trans(handle, inode,
- blocks_for_truncate(inode));
if (bh) {
BUFFER_TRACE(bh, "retaking write access");
ext4_journal_get_write_access(handle, bh);
@@ -4349,6 +4378,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
(__le32 *) bh->b_data,
(__le32 *) bh->b_data + addr_per_block,
depth);
+ brelse(bh);
/*
* Everything below this this pointer has been
@@ -4859,7 +4889,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
}
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
- ei->i_state_flags = 0;
+ ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
@@ -5118,7 +5148,7 @@ static int ext4_do_update_inode(handle_t *handle,
if (ext4_inode_blocks_set(handle, raw_inode, ei))
goto out_brelse;
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
- raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+ raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_HURD))
raw_inode->i_file_acl_high =
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5b4d4e3a4d5..851f49b2f9d 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2608,18 +2608,12 @@ int ext4_mb_release(struct super_block *sb)
static inline int ext4_issue_discard(struct super_block *sb,
ext4_group_t block_group, ext4_grpblk_t block, int count)
{
- int ret;
ext4_fsblk_t discard_block;
discard_block = block + ext4_group_first_block_no(sb, block_group);
trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count);
- ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
- if (ret == -EOPNOTSUPP) {
- ext4_warning(sb, "discard not supported, disabling");
- clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
- }
- return ret;
+ return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
}
/*
@@ -2631,7 +2625,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
struct super_block *sb = journal->j_private;
struct ext4_buddy e4b;
struct ext4_group_info *db;
- int err, count = 0, count2 = 0;
+ int err, ret, count = 0, count2 = 0;
struct ext4_free_data *entry;
struct list_head *l, *ltmp;
@@ -2641,9 +2635,15 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->count, entry->group, entry);
- if (test_opt(sb, DISCARD))
- ext4_issue_discard(sb, entry->group,
+ if (test_opt(sb, DISCARD)) {
+ ret = ext4_issue_discard(sb, entry->group,
entry->start_blk, entry->count);
+ if (unlikely(ret == -EOPNOTSUPP)) {
+ ext4_warning(sb, "discard not supported, "
+ "disabling");
+ clear_opt(sb, DISCARD);
+ }
+ }
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
/* we expect to find existing buddy because it's pinned */
@@ -3881,19 +3881,6 @@ repeat:
}
}
-/*
- * finds all preallocated spaces and return blocks being freed to them
- * if preallocated space becomes full (no block is used from the space)
- * then the function frees space in buddy
- * XXX: at the moment, truncate (which is the only way to free blocks)
- * discards all preallocations
- */
-static void ext4_mb_return_to_preallocation(struct inode *inode,
- struct ext4_buddy *e4b,
- sector_t block, int count)
-{
- BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
-}
#ifdef CONFIG_EXT4_DEBUG
static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
@@ -4283,7 +4270,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
* EDQUOT check, as blocks and quotas have been already
* reserved when data being copied into pagecache.
*/
- if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+ if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
ar->flags |= EXT4_MB_DELALLOC_RESERVED;
else {
/* Without delayed allocation we need to verify
@@ -4380,7 +4367,8 @@ out:
if (inquota && ar->len < inquota)
dquot_free_block(ar->inode, inquota - ar->len);
if (!ar->len) {
- if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+ if (!ext4_test_inode_state(ar->inode,
+ EXT4_STATE_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
reserv_blks);
@@ -4626,7 +4614,11 @@ do_more:
* blocks being freed are metadata. these blocks shouldn't
* be used until this transaction is committed
*/
- new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+ if (!new_entry) {
+ err = -ENOMEM;
+ goto error_return;
+ }
new_entry->start_blk = bit;
new_entry->group = block_group;
new_entry->count = count;
@@ -4643,7 +4635,6 @@ do_more:
ext4_lock_group(sb, block_group);
mb_clear_bits(bitmap_bh->b_data, bit, count);
mb_free_blocks(inode, &e4b, bit, count);
- ext4_mb_return_to_preallocation(inode, &e4b, block, count);
}
ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4718,8 +4709,6 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
ext4_unlock_group(sb, group);
ret = ext4_issue_discard(sb, group, start, count);
- if (ret)
- ext4_std_error(sb, ret);
ext4_lock_group(sb, group);
mb_free_blocks(NULL, e4b, start, ex.fe_len);
@@ -4819,6 +4808,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
ext4_group_t group, ngroups = ext4_get_groups_count(sb);
ext4_grpblk_t cnt = 0, first_block, last_block;
uint64_t start, len, minlen, trimmed;
+ ext4_fsblk_t first_data_blk =
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
int ret = 0;
start = range->start >> sb->s_blocksize_bits;
@@ -4828,6 +4819,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
return -EINVAL;
+ if (start < first_data_blk) {
+ len -= first_data_blk - start;
+ start = first_data_blk;
+ }
/* Determine first and last group to examine based on start and len */
ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
@@ -4851,7 +4846,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (len >= EXT4_BLOCKS_PER_GROUP(sb))
len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
else
- last_block = len;
+ last_block = first_block + len;
if (e4b.bd_info->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 25f3a974b72..b0a126f23c2 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -496,7 +496,7 @@ int ext4_ext_migrate(struct inode *inode)
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
- S_IFREG, 0, goal);
+ S_IFREG, NULL, goal);
if (IS_ERR(tmp_inode)) {
retval = -ENOMEM;
ext4_journal_stop(handle);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index dc40e75cba8..5485390d32c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -581,9 +581,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
- if (!ext4_check_dir_entry(dir, de, bh,
- (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
- +((char *)de - bh->b_data))) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ + ((char *)de - bh->b_data))) {
/* On error, skip the f_pos to the next block. */
dir_file->f_pos = (dir_file->f_pos |
(dir->i_sb->s_blocksize - 1)) + 1;
@@ -820,7 +820,7 @@ static inline int search_dirblock(struct buffer_head *bh,
if ((char *) de + namelen <= dlimit &&
ext4_match (namelen, name, de)) {
/* found a match - just to be sure, do a full check */
- if (!ext4_check_dir_entry(dir, de, bh, offset))
+ if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
return -1;
*res_dir = de;
return 1;
@@ -1036,7 +1036,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
return ERR_PTR(-EIO);
}
inode = ext4_iget(dir->i_sb, ino);
- if (unlikely(IS_ERR(inode))) {
+ if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ESTALE) {
EXT4_ERROR_INODE(dir,
"deleted inode referenced: %u",
@@ -1269,7 +1269,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
de = (struct ext4_dir_entry_2 *)bh->b_data;
top = bh->b_data + blocksize - reclen;
while ((char *) de <= top) {
- if (!ext4_check_dir_entry(dir, de, bh, offset))
+ if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
return -EIO;
if (ext4_match(namelen, name, de))
return -EEXIST;
@@ -1602,7 +1602,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
if (err)
goto journal_error;
}
- ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+ err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+ if (err) {
+ ext4_std_error(inode->i_sb, err);
+ goto cleanup;
+ }
}
de = do_split(handle, dir, &bh, frame, &hinfo, &err);
if (!de)
@@ -1630,17 +1634,21 @@ static int ext4_delete_entry(handle_t *handle,
{
struct ext4_dir_entry_2 *de, *pde;
unsigned int blocksize = dir->i_sb->s_blocksize;
- int i;
+ int i, err;
i = 0;
pde = NULL;
de = (struct ext4_dir_entry_2 *) bh->b_data;
while (i < bh->b_size) {
- if (!ext4_check_dir_entry(dir, de, bh, i))
+ if (ext4_check_dir_entry(dir, NULL, de, bh, i))
return -EIO;
if (de == de_del) {
BUFFER_TRACE(bh, "get_write_access");
- ext4_journal_get_write_access(handle, bh);
+ err = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(err)) {
+ ext4_std_error(dir->i_sb, err);
+ return err;
+ }
if (pde)
pde->rec_len = ext4_rec_len_to_disk(
ext4_rec_len_from_disk(pde->rec_len,
@@ -1652,7 +1660,11 @@ static int ext4_delete_entry(handle_t *handle,
de->inode = 0;
dir->i_version++;
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle, dir, bh);
+ err = ext4_handle_dirty_metadata(handle, dir, bh);
+ if (unlikely(err)) {
+ ext4_std_error(dir->i_sb, err);
+ return err;
+ }
return 0;
}
i += ext4_rec_len_from_disk(de->rec_len, blocksize);
@@ -1789,7 +1801,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
handle_t *handle;
struct inode *inode;
- struct buffer_head *dir_block;
+ struct buffer_head *dir_block = NULL;
struct ext4_dir_entry_2 *de;
unsigned int blocksize = dir->i_sb->s_blocksize;
int err, retries = 0;
@@ -1822,7 +1834,9 @@ retry:
if (!dir_block)
goto out_clear_inode;
BUFFER_TRACE(dir_block, "get_write_access");
- ext4_journal_get_write_access(handle, dir_block);
+ err = ext4_journal_get_write_access(handle, dir_block);
+ if (err)
+ goto out_clear_inode;
de = (struct ext4_dir_entry_2 *) dir_block->b_data;
de->inode = cpu_to_le32(inode->i_ino);
de->name_len = 1;
@@ -1839,10 +1853,12 @@ retry:
ext4_set_de_type(dir->i_sb, de, S_IFDIR);
inode->i_nlink = 2;
BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle, dir, dir_block);
- brelse(dir_block);
- ext4_mark_inode_dirty(handle, inode);
- err = ext4_add_entry(handle, dentry, inode);
+ err = ext4_handle_dirty_metadata(handle, dir, dir_block);
+ if (err)
+ goto out_clear_inode;
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (!err)
+ err = ext4_add_entry(handle, dentry, inode);
if (err) {
out_clear_inode:
clear_nlink(inode);
@@ -1853,10 +1869,13 @@ out_clear_inode:
}
ext4_inc_count(handle, dir);
ext4_update_dx_flag(dir);
- ext4_mark_inode_dirty(handle, dir);
+ err = ext4_mark_inode_dirty(handle, dir);
+ if (err)
+ goto out_clear_inode;
d_instantiate(dentry, inode);
unlock_new_inode(inode);
out_stop:
+ brelse(dir_block);
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
@@ -1919,7 +1938,7 @@ static int empty_dir(struct inode *inode)
}
de = (struct ext4_dir_entry_2 *) bh->b_data;
}
- if (!ext4_check_dir_entry(inode, de, bh, offset)) {
+ if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
de = (struct ext4_dir_entry_2 *)(bh->b_data +
sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
@@ -2407,7 +2426,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_current_time(new_dir);
ext4_mark_inode_dirty(handle, new_dir);
BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+ retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+ if (unlikely(retval)) {
+ ext4_std_error(new_dir->i_sb, retval);
+ goto end_rename;
+ }
brelse(new_bh);
new_bh = NULL;
}
@@ -2459,7 +2482,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+ retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+ if (retval) {
+ ext4_std_error(old_dir->i_sb, retval);
+ goto end_rename;
+ }
ext4_dec_count(handle, old_dir);
if (new_inode) {
/* checked empty_dir above, can't have another parent,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index beacce11ac5..7270dcfca92 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -44,7 +44,7 @@ int __init ext4_init_pageio(void)
if (io_page_cachep == NULL)
return -ENOMEM;
io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
- if (io_page_cachep == NULL) {
+ if (io_end_cachep == NULL) {
kmem_cache_destroy(io_page_cachep);
return -ENOMEM;
}
@@ -158,11 +158,8 @@ static void ext4_end_io_work(struct work_struct *work)
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
{
- ext4_io_end_t *io = NULL;
-
- io = kmem_cache_alloc(io_end_cachep, flags);
+ ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
if (io) {
- memset(io, 0, sizeof(*io));
atomic_inc(&EXT4_I(inode)->i_ioend_count);
io->inode = inode;
INIT_WORK(&io->work, ext4_end_io_work);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 981c8477ada..3ecc6e45d2f 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -220,7 +220,11 @@ static int setup_new_group_blocks(struct super_block *sb,
memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
unlock_buffer(gdb);
- ext4_handle_dirty_metadata(handle, NULL, gdb);
+ err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+ if (unlikely(err)) {
+ brelse(gdb);
+ goto exit_bh;
+ }
ext4_set_bit(bit, bh->b_data);
brelse(gdb);
}
@@ -258,7 +262,11 @@ static int setup_new_group_blocks(struct super_block *sb,
ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
bh->b_data);
- ext4_handle_dirty_metadata(handle, NULL, bh);
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (unlikely(err)) {
+ ext4_std_error(sb, err);
+ goto exit_bh;
+ }
brelse(bh);
/* Mark unused entries in inode bitmap used */
ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
@@ -270,7 +278,9 @@ static int setup_new_group_blocks(struct super_block *sb,
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
bh->b_data);
- ext4_handle_dirty_metadata(handle, NULL, bh);
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (unlikely(err))
+ ext4_std_error(sb, err);
exit_bh:
brelse(bh);
@@ -422,17 +432,21 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
goto exit_dind;
}
- if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh)))
+ err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+ if (unlikely(err))
goto exit_dind;
- if ((err = ext4_journal_get_write_access(handle, *primary)))
+ err = ext4_journal_get_write_access(handle, *primary);
+ if (unlikely(err))
goto exit_sbh;
- if ((err = ext4_journal_get_write_access(handle, dind)))
- goto exit_primary;
+ err = ext4_journal_get_write_access(handle, dind);
+ if (unlikely(err))
+ ext4_std_error(sb, err);
/* ext4_reserve_inode_write() gets a reference on the iloc */
- if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
+ err = ext4_reserve_inode_write(handle, inode, &iloc);
+ if (unlikely(err))
goto exit_dindj;
n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
@@ -454,12 +468,20 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
* reserved inode, and will become GDT blocks (primary and backup).
*/
data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
- ext4_handle_dirty_metadata(handle, NULL, dind);
- brelse(dind);
+ err = ext4_handle_dirty_metadata(handle, NULL, dind);
+ if (unlikely(err)) {
+ ext4_std_error(sb, err);
+ goto exit_inode;
+ }
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
ext4_mark_iloc_dirty(handle, inode, &iloc);
memset((*primary)->b_data, 0, sb->s_blocksize);
- ext4_handle_dirty_metadata(handle, NULL, *primary);
+ err = ext4_handle_dirty_metadata(handle, NULL, *primary);
+ if (unlikely(err)) {
+ ext4_std_error(sb, err);
+ goto exit_inode;
+ }
+ brelse(dind);
o_group_desc = EXT4_SB(sb)->s_group_desc;
memcpy(n_group_desc, o_group_desc,
@@ -470,19 +492,19 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
kfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
- ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+ err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+ if (err)
+ ext4_std_error(sb, err);
- return 0;
+ return err;
exit_inode:
/* ext4_journal_release_buffer(handle, iloc.bh); */
brelse(iloc.bh);
exit_dindj:
/* ext4_journal_release_buffer(handle, dind); */
-exit_primary:
- /* ext4_journal_release_buffer(handle, *primary); */
exit_sbh:
- /* ext4_journal_release_buffer(handle, *primary); */
+ /* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
exit_dind:
brelse(dind);
exit_bh:
@@ -665,7 +687,9 @@ static void update_backups(struct super_block *sb,
memset(bh->b_data + size, 0, rest);
set_buffer_uptodate(bh);
unlock_buffer(bh);
- ext4_handle_dirty_metadata(handle, NULL, bh);
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (unlikely(err))
+ ext4_std_error(sb, err);
brelse(bh);
}
if ((err2 = ext4_journal_stop(handle)) && !err)
@@ -883,7 +907,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
/* Update the global fs size fields */
sbi->s_groups_count++;
- ext4_handle_dirty_metadata(handle, NULL, primary);
+ err = ext4_handle_dirty_metadata(handle, NULL, primary);
+ if (unlikely(err)) {
+ ext4_std_error(sb, err);
+ goto exit_journal;
+ }
/* Update the reserved block counts only once the new group is
* active. */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cd37f9d5e44..29c80f6d8b2 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -388,13 +388,14 @@ static void ext4_handle_error(struct super_block *sb)
void __ext4_error(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
- sb->s_id, function, line, current->comm);
- vprintk(fmt, args);
- printk("\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+ sb->s_id, function, line, current->comm, &vaf);
va_end(args);
ext4_handle_error(sb);
@@ -405,28 +406,31 @@ void ext4_error_inode(struct inode *inode, const char *function,
const char *fmt, ...)
{
va_list args;
+ struct va_format vaf;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
es->s_last_error_block = cpu_to_le64(block);
save_error_info(inode->i_sb, function, line);
va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
inode->i_sb->s_id, function, line, inode->i_ino);
if (block)
- printk("block %llu: ", block);
- printk("comm %s: ", current->comm);
- vprintk(fmt, args);
- printk("\n");
+ printk(KERN_CONT "block %llu: ", block);
+ printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
va_end(args);
ext4_handle_error(inode->i_sb);
}
void ext4_error_file(struct file *file, const char *function,
- unsigned int line, const char *fmt, ...)
+ unsigned int line, ext4_fsblk_t block,
+ const char *fmt, ...)
{
va_list args;
+ struct va_format vaf;
struct ext4_super_block *es;
struct inode *inode = file->f_dentry->d_inode;
char pathname[80], *path;
@@ -434,17 +438,18 @@ void ext4_error_file(struct file *file, const char *function,
es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
save_error_info(inode->i_sb, function, line);
- va_start(args, fmt);
path = d_path(&(file->f_path), pathname, sizeof(pathname));
- if (!path)
+ if (IS_ERR(path))
path = "(unknown)";
printk(KERN_CRIT
- "EXT4-fs error (device %s): %s:%d: inode #%lu "
- "(comm %s path %s): ",
- inode->i_sb->s_id, function, line, inode->i_ino,
- current->comm, path);
- vprintk(fmt, args);
- printk("\n");
+ "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
+ inode->i_sb->s_id, function, line, inode->i_ino);
+ if (block)
+ printk(KERN_CONT "block %llu: ", block);
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
va_end(args);
ext4_handle_error(inode->i_sb);
@@ -543,28 +548,29 @@ void __ext4_abort(struct super_block *sb, const char *function,
panic("EXT4-fs panic from previous error\n");
}
-void ext4_msg (struct super_block * sb, const char *prefix,
- const char *fmt, ...)
+void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk("%sEXT4-fs (%s): ", prefix, sb->s_id);
- vprintk(fmt, args);
- printk("\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
va_end(args);
}
void __ext4_warning(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
- sb->s_id, function, line);
- vprintk(fmt, args);
- printk("\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
+ sb->s_id, function, line, &vaf);
va_end(args);
}
@@ -575,21 +581,25 @@ void __ext4_grp_locked_error(const char *function, unsigned int line,
__releases(bitlock)
__acquires(bitlock)
{
+ struct va_format vaf;
va_list args;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
es->s_last_error_ino = cpu_to_le32(ino);
es->s_last_error_block = cpu_to_le64(block);
__save_error_info(sb, function, line);
+
va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
sb->s_id, function, line, grp);
if (ino)
- printk("inode %lu: ", ino);
+ printk(KERN_CONT "inode %lu: ", ino);
if (block)
- printk("block %llu:", (unsigned long long) block);
- vprintk(fmt, args);
- printk("\n");
+ printk(KERN_CONT "block %llu:", (unsigned long long) block);
+ printk(KERN_CONT "%pV\n", &vaf);
va_end(args);
if (test_opt(sb, ERRORS_CONT)) {
@@ -808,21 +818,15 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
INIT_LIST_HEAD(&ei->i_prealloc_list);
spin_lock_init(&ei->i_prealloc_lock);
- /*
- * Note: We can be called before EXT4_SB(sb)->s_journal is set,
- * therefore it can be null here. Don't check it, just initialize
- * jinode.
- */
- jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
- ei->i_delalloc_reserved_flag = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
#endif
+ ei->jinode = NULL;
INIT_LIST_HEAD(&ei->i_completed_io_list);
spin_lock_init(&ei->i_completed_io_lock);
ei->cur_aio_dio = NULL;
@@ -898,9 +902,12 @@ void ext4_clear_inode(struct inode *inode)
end_writeback(inode);
dquot_drop(inode);
ext4_discard_preallocations(inode);
- if (EXT4_JOURNAL(inode))
- jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
- &EXT4_I(inode)->jinode);
+ if (EXT4_I(inode)->jinode) {
+ jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
+ EXT4_I(inode)->jinode);
+ jbd2_free_inode(EXT4_I(inode)->jinode);
+ EXT4_I(inode)->jinode = NULL;
+ }
}
static inline void ext4_show_quota_options(struct seq_file *seq,
@@ -1393,7 +1400,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
sbi->s_qf_names[qtype] = NULL;
return 0;
}
- set_opt(sbi->s_mount_opt, QUOTA);
+ set_opt(sb, QUOTA);
return 1;
}
@@ -1448,21 +1455,21 @@ static int parse_options(char *options, struct super_block *sb,
switch (token) {
case Opt_bsd_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
- clear_opt(sbi->s_mount_opt, MINIX_DF);
+ clear_opt(sb, MINIX_DF);
break;
case Opt_minix_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
- set_opt(sbi->s_mount_opt, MINIX_DF);
+ set_opt(sb, MINIX_DF);
break;
case Opt_grpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
- set_opt(sbi->s_mount_opt, GRPID);
+ set_opt(sb, GRPID);
break;
case Opt_nogrpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
- clear_opt(sbi->s_mount_opt, GRPID);
+ clear_opt(sb, GRPID);
break;
case Opt_resuid:
@@ -1480,38 +1487,38 @@ static int parse_options(char *options, struct super_block *sb,
/* *sb_block = match_int(&args[0]); */
break;
case Opt_err_panic:
- clear_opt(sbi->s_mount_opt, ERRORS_CONT);
- clear_opt(sbi->s_mount_opt, ERRORS_RO);
- set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+ clear_opt(sb, ERRORS_CONT);
+ clear_opt(sb, ERRORS_RO);
+ set_opt(sb, ERRORS_PANIC);
break;
case Opt_err_ro:
- clear_opt(sbi->s_mount_opt, ERRORS_CONT);
- clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
- set_opt(sbi->s_mount_opt, ERRORS_RO);
+ clear_opt(sb, ERRORS_CONT);
+ clear_opt(sb, ERRORS_PANIC);
+ set_opt(sb, ERRORS_RO);
break;
case Opt_err_cont:
- clear_opt(sbi->s_mount_opt, ERRORS_RO);
- clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
- set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ clear_opt(sb, ERRORS_RO);
+ clear_opt(sb, ERRORS_PANIC);
+ set_opt(sb, ERRORS_CONT);
break;
case Opt_nouid32:
- set_opt(sbi->s_mount_opt, NO_UID32);
+ set_opt(sb, NO_UID32);
break;
case Opt_debug:
- set_opt(sbi->s_mount_opt, DEBUG);
+ set_opt(sb, DEBUG);
break;
case Opt_oldalloc:
- set_opt(sbi->s_mount_opt, OLDALLOC);
+ set_opt(sb, OLDALLOC);
break;
case Opt_orlov:
- clear_opt(sbi->s_mount_opt, OLDALLOC);
+ clear_opt(sb, OLDALLOC);
break;
#ifdef CONFIG_EXT4_FS_XATTR
case Opt_user_xattr:
- set_opt(sbi->s_mount_opt, XATTR_USER);
+ set_opt(sb, XATTR_USER);
break;
case Opt_nouser_xattr:
- clear_opt(sbi->s_mount_opt, XATTR_USER);
+ clear_opt(sb, XATTR_USER);
break;
#else
case Opt_user_xattr:
@@ -1521,10 +1528,10 @@ static int parse_options(char *options, struct super_block *sb,
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
case Opt_acl:
- set_opt(sbi->s_mount_opt, POSIX_ACL);
+ set_opt(sb, POSIX_ACL);
break;
case Opt_noacl:
- clear_opt(sbi->s_mount_opt, POSIX_ACL);
+ clear_opt(sb, POSIX_ACL);
break;
#else
case Opt_acl:
@@ -1543,7 +1550,7 @@ static int parse_options(char *options, struct super_block *sb,
"Cannot specify journal on remount");
return 0;
}
- set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
+ set_opt(sb, UPDATE_JOURNAL);
break;
case Opt_journal_dev:
if (is_remount) {
@@ -1556,14 +1563,14 @@ static int parse_options(char *options, struct super_block *sb,
*journal_devnum = option;
break;
case Opt_journal_checksum:
- set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+ set_opt(sb, JOURNAL_CHECKSUM);
break;
case Opt_journal_async_commit:
- set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
- set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+ set_opt(sb, JOURNAL_ASYNC_COMMIT);
+ set_opt(sb, JOURNAL_CHECKSUM);
break;
case Opt_noload:
- set_opt(sbi->s_mount_opt, NOLOAD);
+ set_opt(sb, NOLOAD);
break;
case Opt_commit:
if (match_int(&args[0], &option))
@@ -1606,15 +1613,15 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
} else {
- clear_opt(sbi->s_mount_opt, DATA_FLAGS);
+ clear_opt(sb, DATA_FLAGS);
sbi->s_mount_opt |= data_opt;
}
break;
case Opt_data_err_abort:
- set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+ set_opt(sb, DATA_ERR_ABORT);
break;
case Opt_data_err_ignore:
- clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+ clear_opt(sb, DATA_ERR_ABORT);
break;
#ifdef CONFIG_QUOTA
case Opt_usrjquota:
@@ -1654,12 +1661,12 @@ set_qf_format:
break;
case Opt_quota:
case Opt_usrquota:
- set_opt(sbi->s_mount_opt, QUOTA);
- set_opt(sbi->s_mount_opt, USRQUOTA);
+ set_opt(sb, QUOTA);
+ set_opt(sb, USRQUOTA);
break;
case Opt_grpquota:
- set_opt(sbi->s_mount_opt, QUOTA);
- set_opt(sbi->s_mount_opt, GRPQUOTA);
+ set_opt(sb, QUOTA);
+ set_opt(sb, GRPQUOTA);
break;
case Opt_noquota:
if (sb_any_quota_loaded(sb)) {
@@ -1667,9 +1674,9 @@ set_qf_format:
"options when quota turned on");
return 0;
}
- clear_opt(sbi->s_mount_opt, QUOTA);
- clear_opt(sbi->s_mount_opt, USRQUOTA);
- clear_opt(sbi->s_mount_opt, GRPQUOTA);
+ clear_opt(sb, QUOTA);
+ clear_opt(sb, USRQUOTA);
+ clear_opt(sb, GRPQUOTA);
break;
#else
case Opt_quota:
@@ -1695,7 +1702,7 @@ set_qf_format:
sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
break;
case Opt_nobarrier:
- clear_opt(sbi->s_mount_opt, BARRIER);
+ clear_opt(sb, BARRIER);
break;
case Opt_barrier:
if (args[0].from) {
@@ -1704,9 +1711,9 @@ set_qf_format:
} else
option = 1; /* No argument, default to 1 */
if (option)
- set_opt(sbi->s_mount_opt, BARRIER);
+ set_opt(sb, BARRIER);
else
- clear_opt(sbi->s_mount_opt, BARRIER);
+ clear_opt(sb, BARRIER);
break;
case Opt_ignore:
break;
@@ -1730,17 +1737,17 @@ set_qf_format:
"Ignoring deprecated bh option");
break;
case Opt_i_version:
- set_opt(sbi->s_mount_opt, I_VERSION);
+ set_opt(sb, I_VERSION);
sb->s_flags |= MS_I_VERSION;
break;
case Opt_nodelalloc:
- clear_opt(sbi->s_mount_opt, DELALLOC);
+ clear_opt(sb, DELALLOC);
break;
case Opt_mblk_io_submit:
- set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+ set_opt(sb, MBLK_IO_SUBMIT);
break;
case Opt_nomblk_io_submit:
- clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+ clear_opt(sb, MBLK_IO_SUBMIT);
break;
case Opt_stripe:
if (match_int(&args[0], &option))
@@ -1750,13 +1757,13 @@ set_qf_format:
sbi->s_stripe = option;
break;
case Opt_delalloc:
- set_opt(sbi->s_mount_opt, DELALLOC);
+ set_opt(sb, DELALLOC);
break;
case Opt_block_validity:
- set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+ set_opt(sb, BLOCK_VALIDITY);
break;
case Opt_noblock_validity:
- clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+ clear_opt(sb, BLOCK_VALIDITY);
break;
case Opt_inode_readahead_blks:
if (match_int(&args[0], &option))
@@ -1780,7 +1787,7 @@ set_qf_format:
option);
break;
case Opt_noauto_da_alloc:
- set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+ set_opt(sb, NO_AUTO_DA_ALLOC);
break;
case Opt_auto_da_alloc:
if (args[0].from) {
@@ -1789,24 +1796,24 @@ set_qf_format:
} else
option = 1; /* No argument, default to 1 */
if (option)
- clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
+ clear_opt(sb, NO_AUTO_DA_ALLOC);
else
- set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+ set_opt(sb,NO_AUTO_DA_ALLOC);
break;
case Opt_discard:
- set_opt(sbi->s_mount_opt, DISCARD);
+ set_opt(sb, DISCARD);
break;
case Opt_nodiscard:
- clear_opt(sbi->s_mount_opt, DISCARD);
+ clear_opt(sb, DISCARD);
break;
case Opt_dioread_nolock:
- set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ set_opt(sb, DIOREAD_NOLOCK);
break;
case Opt_dioread_lock:
- clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ clear_opt(sb, DIOREAD_NOLOCK);
break;
case Opt_init_inode_table:
- set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+ set_opt(sb, INIT_INODE_TABLE);
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
@@ -1817,7 +1824,7 @@ set_qf_format:
sbi->s_li_wait_mult = option;
break;
case Opt_noinit_inode_table:
- clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+ clear_opt(sb, INIT_INODE_TABLE);
break;
default:
ext4_msg(sb, KERN_ERR,
@@ -1829,10 +1836,10 @@ set_qf_format:
#ifdef CONFIG_QUOTA
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
- clear_opt(sbi->s_mount_opt, USRQUOTA);
+ clear_opt(sb, USRQUOTA);
if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
- clear_opt(sbi->s_mount_opt, GRPQUOTA);
+ clear_opt(sb, GRPQUOTA);
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
ext4_msg(sb, KERN_ERR, "old and new quota "
@@ -1902,12 +1909,12 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
ext4_commit_super(sb, 1);
if (test_opt(sb, DEBUG))
printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
- "bpg=%lu, ipg=%lu, mo=%04x]\n",
+ "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
sb->s_blocksize,
sbi->s_groups_count,
EXT4_BLOCKS_PER_GROUP(sb),
EXT4_INODES_PER_GROUP(sb),
- sbi->s_mount_opt);
+ sbi->s_mount_opt, sbi->s_mount_opt2);
return res;
}
@@ -1937,14 +1944,13 @@ static int ext4_fill_flex_info(struct super_block *sb)
size = flex_group_count * sizeof(struct flex_groups);
sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
if (sbi->s_flex_groups == NULL) {
- sbi->s_flex_groups = vmalloc(size);
- if (sbi->s_flex_groups)
- memset(sbi->s_flex_groups, 0, size);
- }
- if (sbi->s_flex_groups == NULL) {
- ext4_msg(sb, KERN_ERR, "not enough memory for "
- "%u flex groups", flex_group_count);
- goto failed;
+ sbi->s_flex_groups = vzalloc(size);
+ if (sbi->s_flex_groups == NULL) {
+ ext4_msg(sb, KERN_ERR,
+ "not enough memory for %u flex groups",
+ flex_group_count);
+ goto failed;
+ }
}
for (i = 0; i < sbi->s_groups_count; i++) {
@@ -2923,7 +2929,7 @@ static int ext4_register_li_request(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_li_request *elr;
ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
- int ret;
+ int ret = 0;
if (sbi->s_li_request != NULL)
return 0;
@@ -3078,41 +3084,41 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
- set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+ set_opt(sb, INIT_INODE_TABLE);
if (def_mount_opts & EXT4_DEFM_DEBUG)
- set_opt(sbi->s_mount_opt, DEBUG);
+ set_opt(sb, DEBUG);
if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
"2.6.38");
- set_opt(sbi->s_mount_opt, GRPID);
+ set_opt(sb, GRPID);
}
if (def_mount_opts & EXT4_DEFM_UID16)
- set_opt(sbi->s_mount_opt, NO_UID32);
+ set_opt(sb, NO_UID32);
#ifdef CONFIG_EXT4_FS_XATTR
if (def_mount_opts & EXT4_DEFM_XATTR_USER)
- set_opt(sbi->s_mount_opt, XATTR_USER);
+ set_opt(sb, XATTR_USER);
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (def_mount_opts & EXT4_DEFM_ACL)
- set_opt(sbi->s_mount_opt, POSIX_ACL);
+ set_opt(sb, POSIX_ACL);
#endif
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
- set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+ set_opt(sb, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
- set_opt(sbi->s_mount_opt, ORDERED_DATA);
+ set_opt(sb, ORDERED_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
- set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+ set_opt(sb, WRITEBACK_DATA);
if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
- set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+ set_opt(sb, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
- set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ set_opt(sb, ERRORS_CONT);
else
- set_opt(sbi->s_mount_opt, ERRORS_RO);
+ set_opt(sb, ERRORS_RO);
if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
- set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+ set_opt(sb, BLOCK_VALIDITY);
if (def_mount_opts & EXT4_DEFM_DISCARD)
- set_opt(sbi->s_mount_opt, DISCARD);
+ set_opt(sb, DISCARD);
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -3121,7 +3127,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
- set_opt(sbi->s_mount_opt, BARRIER);
+ set_opt(sb, BARRIER);
/*
* enable delayed allocation by default
@@ -3129,7 +3135,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
if (!IS_EXT3_SB(sb) &&
((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
- set_opt(sbi->s_mount_opt, DELALLOC);
+ set_opt(sb, DELALLOC);
if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
&journal_devnum, &journal_ioprio, NULL, 0)) {
@@ -3432,8 +3438,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"suppressed and not mounted read-only");
goto failed_mount_wq;
} else {
- clear_opt(sbi->s_mount_opt, DATA_FLAGS);
- set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+ clear_opt(sb, DATA_FLAGS);
+ set_opt(sb, WRITEBACK_DATA);
sbi->s_journal = NULL;
needs_recovery = 0;
goto no_journal;
@@ -3471,9 +3477,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
if (jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
- set_opt(sbi->s_mount_opt, ORDERED_DATA);
+ set_opt(sb, ORDERED_DATA);
else
- set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+ set_opt(sb, JOURNAL_DATA);
break;
case EXT4_MOUNT_ORDERED_DATA:
@@ -3563,18 +3569,18 @@ no_journal:
(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
"requested data journaling mode");
- clear_opt(sbi->s_mount_opt, DELALLOC);
+ clear_opt(sb, DELALLOC);
}
if (test_opt(sb, DIOREAD_NOLOCK)) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
"option - requested data journaling mode");
- clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ clear_opt(sb, DIOREAD_NOLOCK);
}
if (sb->s_blocksize < PAGE_SIZE) {
ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
"option - block size is too small");
- clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ clear_opt(sb, DIOREAD_NOLOCK);
}
}
@@ -4173,6 +4179,22 @@ static int ext4_unfreeze(struct super_block *sb)
return 0;
}
+/*
+ * Structure to save mount options for ext4_remount's benefit
+ */
+struct ext4_mount_options {
+ unsigned long s_mount_opt;
+ unsigned long s_mount_opt2;
+ uid_t s_resuid;
+ gid_t s_resgid;
+ unsigned long s_commit_interval;
+ u32 s_min_batch_time, s_max_batch_time;
+#ifdef CONFIG_QUOTA
+ int s_jquota_fmt;
+ char *s_qf_names[MAXQUOTAS];
+#endif
+};
+
static int ext4_remount(struct super_block *sb, int *flags, char *data)
{
struct ext4_super_block *es;
@@ -4193,6 +4215,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
lock_super(sb);
old_sb_flags = sb->s_flags;
old_opts.s_mount_opt = sbi->s_mount_opt;
+ old_opts.s_mount_opt2 = sbi->s_mount_opt2;
old_opts.s_resuid = sbi->s_resuid;
old_opts.s_resgid = sbi->s_resgid;
old_opts.s_commit_interval = sbi->s_commit_interval;
@@ -4346,6 +4369,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
restore_opts:
sb->s_flags = old_sb_flags;
sbi->s_mount_opt = old_opts.s_mount_opt;
+ sbi->s_mount_opt2 = old_opts.s_mount_opt2;
sbi->s_resuid = old_opts.s_resuid;
sbi->s_resgid = old_opts.s_resgid;
sbi->s_commit_interval = old_opts.s_commit_interval;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fa4b899da4b..fc32176eee3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -427,23 +427,23 @@ cleanup:
static int
ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- int i_error, b_error;
+ int ret, ret2;
down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
- i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
- if (i_error < 0) {
- b_error = 0;
- } else {
- if (buffer) {
- buffer += i_error;
- buffer_size -= i_error;
- }
- b_error = ext4_xattr_block_list(dentry, buffer, buffer_size);
- if (b_error < 0)
- i_error = 0;
+ ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
+ if (ret < 0)
+ goto errout;
+ if (buffer) {
+ buffer += ret;
+ buffer_size -= ret;
}
+ ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
+ if (ret < 0)
+ goto errout;
+ ret += ret2;
+errout:
up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
- return i_error + b_error;
+ return ret;
}
/*
@@ -947,7 +947,7 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
/*
* ext4_xattr_set_handle()
*
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode. Value
* is NULL to remove an existing extended attribute, and non-NULL to
* either replace an existing extended attribute, or create a new extended
* attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f837ba95352..9e4686900f1 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -43,6 +43,7 @@
#include <linux/vmalloc.h>
#include <linux/backing-dev.h>
#include <linux/bitops.h>
+#include <linux/ratelimit.h>
#define CREATE_TRACE_POINTS
#include <trace/events/jbd2.h>
@@ -93,6 +94,7 @@ EXPORT_SYMBOL(jbd2_journal_file_inode);
EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
+EXPORT_SYMBOL(jbd2_inode_cache);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
@@ -827,7 +829,7 @@ static journal_t * journal_init_common (void)
journal = kzalloc(sizeof(*journal), GFP_KERNEL);
if (!journal)
- goto fail;
+ return NULL;
init_waitqueue_head(&journal->j_wait_transaction_locked);
init_waitqueue_head(&journal->j_wait_logspace);
@@ -852,14 +854,12 @@ static journal_t * journal_init_common (void)
err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
if (err) {
kfree(journal);
- goto fail;
+ return NULL;
}
spin_lock_init(&journal->j_history_lock);
return journal;
-fail:
- return NULL;
}
/* jbd2_journal_init_dev and jbd2_journal_init_inode:
@@ -1982,7 +1982,6 @@ static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
static struct journal_head *journal_alloc_journal_head(void)
{
struct journal_head *ret;
- static unsigned long last_warning;
#ifdef CONFIG_JBD2_DEBUG
atomic_inc(&nr_journal_heads);
@@ -1990,11 +1989,7 @@ static struct journal_head *journal_alloc_journal_head(void)
ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
if (!ret) {
jbd_debug(1, "out of memory for journal_head\n");
- if (time_after(jiffies, last_warning + 5*HZ)) {
- printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
- __func__);
- last_warning = jiffies;
- }
+ pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
while (!ret) {
yield();
ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -2292,17 +2287,19 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void)
#endif
-struct kmem_cache *jbd2_handle_cache;
+struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
static int __init journal_init_handle_cache(void)
{
- jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle",
- sizeof(handle_t),
- 0, /* offset */
- SLAB_TEMPORARY, /* flags */
- NULL); /* ctor */
+ jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
if (jbd2_handle_cache == NULL) {
- printk(KERN_EMERG "JBD: failed to create handle cache\n");
+ printk(KERN_EMERG "JBD2: failed to create handle cache\n");
+ return -ENOMEM;
+ }
+ jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
+ if (jbd2_inode_cache == NULL) {
+ printk(KERN_EMERG "JBD2: failed to create inode cache\n");
+ kmem_cache_destroy(jbd2_handle_cache);
return -ENOMEM;
}
return 0;
@@ -2312,6 +2309,9 @@ static void jbd2_journal_destroy_handle_cache(void)
{
if (jbd2_handle_cache)
kmem_cache_destroy(jbd2_handle_cache);
+ if (jbd2_inode_cache)
+ kmem_cache_destroy(jbd2_inode_cache);
+
}
/*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 2bc4d5f116f..1cad869494f 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -299,10 +299,10 @@ int jbd2_journal_skip_recovery(journal_t *journal)
#ifdef CONFIG_JBD2_DEBUG
int dropped = info.end_transaction -
be32_to_cpu(journal->j_superblock->s_sequence);
-#endif
jbd_debug(1,
"JBD: ignoring %d transaction%s from the journal.\n",
dropped, (dropped == 1) ? "" : "s");
+#endif
journal->j_transaction_sequence = ++info.end_transaction;
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6bf0a242613..394893242ae 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -340,9 +340,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
jbd2_free_handle(handle);
current->journal_info = NULL;
handle = ERR_PTR(err);
- goto out;
}
-out:
return handle;
}
EXPORT_SYMBOL(jbd2__journal_start);
@@ -589,7 +587,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
transaction = handle->h_transaction;
journal = transaction->t_journal;
- jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+ jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
JBUFFER_TRACE(jh, "entry");
repeat:
@@ -774,7 +772,7 @@ done:
J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
"Possible IO failure.\n");
page = jh2bh(jh)->b_page;
- offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+ offset = offset_in_page(jh2bh(jh)->b_data);
source = kmap_atomic(page, KM_USER0);
/* Fire data frozen trigger just before we copy the data */
jbd2_buffer_frozen_trigger(jh, source + offset,
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index 97f6073ab33..ca58d64374c 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_LOCKD) += lockd.o
-lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \
- svcproc.o svcsubs.o mon.o xdr.o grace.o
-lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o
+lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
+ svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o
+lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
lockd-objs := $(lockd-objs-y)
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
new file mode 100644
index 00000000000..f848b52c67b
--- /dev/null
+++ b/fs/lockd/clnt4xdr.c
@@ -0,0 +1,605 @@
+/*
+ * linux/fs/lockd/clnt4xdr.c
+ *
+ * XDR functions to encode/decode NLM version 4 RPC arguments and results.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
+# error "NLM host name cannot be larger than NLM's maximum string length!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM4_void_sz (0)
+#define NLM4_cookie_sz (1+(NLM_MAXCOOKIELEN>>2))
+#define NLM4_caller_sz (1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_owner_sz (1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_fhandle_sz (1+(NFS3_FHSIZE>>2))
+#define NLM4_lock_sz (5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz)
+#define NLM4_holder_sz (6+NLM4_owner_sz)
+
+#define NLM4_testargs_sz (NLM4_cookie_sz+1+NLM4_lock_sz)
+#define NLM4_lockargs_sz (NLM4_cookie_sz+4+NLM4_lock_sz)
+#define NLM4_cancargs_sz (NLM4_cookie_sz+2+NLM4_lock_sz)
+#define NLM4_unlockargs_sz (NLM4_cookie_sz+NLM4_lock_sz)
+
+#define NLM4_testres_sz (NLM4_cookie_sz+1+NLM4_holder_sz)
+#define NLM4_res_sz (NLM4_cookie_sz+1)
+#define NLM4_norep_sz (0)
+
+
+static s64 loff_t_to_s64(loff_t offset)
+{
+ s64 res;
+
+ if (offset >= NLM4_OFFSET_MAX)
+ res = NLM4_OFFSET_MAX;
+ else if (offset <= -NLM4_OFFSET_MAX)
+ res = -NLM4_OFFSET_MAX;
+ else
+ res = offset;
+ return res;
+}
+
+static void nlm4_compute_offsets(const struct nlm_lock *lock,
+ u64 *l_offset, u64 *l_len)
+{
+ const struct file_lock *fl = &lock->fl;
+
+ BUG_ON(fl->fl_start > NLM4_OFFSET_MAX);
+ BUG_ON(fl->fl_end > NLM4_OFFSET_MAX &&
+ fl->fl_end != OFFSET_MAX);
+
+ *l_offset = loff_t_to_s64(fl->fl_start);
+ if (fl->fl_end == OFFSET_MAX)
+ *l_len = 0;
+ else
+ *l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv4 basic data types
+ *
+ * Basic NLMv4 data types are defined in Appendix II, section 6.1.4
+ * of RFC 1813: "NFS Version 3 Protocol Specification" and in Chapter
+ * 10 of X/Open's "Protocols for Interworking: XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions. For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(value);
+}
+
+/*
+ * typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+ const u8 *data, const unsigned int length)
+{
+ __be32 *p;
+
+ BUG_ON(length > XDR_MAX_NETOBJ);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+ struct xdr_netobj *obj)
+{
+ u32 length;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p++);
+ if (unlikely(length > XDR_MAX_NETOBJ))
+ goto out_size;
+ obj->len = length;
+ obj->data = (u8 *)p;
+ return 0;
+out_size:
+ dprintk("NFS: returned netobj was too long: %u\n", length);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+ const struct nlm_cookie *cookie)
+{
+ BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+ encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+ struct nlm_cookie *cookie)
+{
+ u32 length;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p++);
+ /* apparently HPUX can return empty cookies */
+ if (length == 0)
+ goto out_hpux;
+ if (length > NLM_MAXCOOKIELEN)
+ goto out_size;
+ p = xdr_inline_decode(xdr, length);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ cookie->len = length;
+ memcpy(cookie->data, p, length);
+ return 0;
+out_hpux:
+ cookie->len = 4;
+ memset(cookie->data, 0, 4);
+ return 0;
+out_size:
+ dprintk("NFS: returned cookie was too long: %u\n", length);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+ BUG_ON(fh->size > NFS3_FHSIZE);
+ encode_netobj(xdr, (u8 *)&fh->data, fh->size);
+}
+
+/*
+ * enum nlm4_stats {
+ * NLM4_GRANTED = 0,
+ * NLM4_DENIED = 1,
+ * NLM4_DENIED_NOLOCKS = 2,
+ * NLM4_BLOCKED = 3,
+ * NLM4_DENIED_GRACE_PERIOD = 4,
+ * NLM4_DEADLCK = 5,
+ * NLM4_ROFS = 6,
+ * NLM4_STALE_FH = 7,
+ * NLM4_FBIG = 8,
+ * NLM4_FAILED = 9
+ * };
+ *
+ * struct nlm4_stat {
+ * nlm4_stats stat;
+ * };
+ *
+ * NB: we don't swap bytes for the NLM status values. The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+static void encode_nlm4_stat(struct xdr_stream *xdr,
+ const __be32 stat)
+{
+ __be32 *p;
+
+ BUG_ON(be32_to_cpu(stat) > NLM_FAILED);
+ p = xdr_reserve_space(xdr, 4);
+ *p = stat;
+}
+
+static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (unlikely(*p > nlm4_failed))
+ goto out_bad_xdr;
+ *stat = *p;
+ return 0;
+out_bad_xdr:
+ dprintk("%s: server returned invalid nlm4_stats value: %u\n",
+ __func__, be32_to_cpup(p));
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * struct nlm4_holder {
+ * bool exclusive;
+ * int32 svid;
+ * netobj oh;
+ * uint64 l_offset;
+ * uint64 l_len;
+ * };
+ */
+static void encode_nlm4_holder(struct xdr_stream *xdr,
+ const struct nlm_res *result)
+{
+ const struct nlm_lock *lock = &result->lock;
+ u64 l_offset, l_len;
+ __be32 *p;
+
+ encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+ encode_int32(xdr, lock->svid);
+ encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+ p = xdr_reserve_space(xdr, 4 + 4);
+ nlm4_compute_offsets(lock, &l_offset, &l_len);
+ p = xdr_encode_hyper(p, l_offset);
+ xdr_encode_hyper(p, l_len);
+}
+
+static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+ struct nlm_lock *lock = &result->lock;
+ struct file_lock *fl = &lock->fl;
+ u64 l_offset, l_len;
+ u32 exclusive;
+ int error;
+ __be32 *p;
+ s32 end;
+
+ memset(lock, 0, sizeof(*lock));
+ locks_init_lock(fl);
+
+ p = xdr_inline_decode(xdr, 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ exclusive = be32_to_cpup(p++);
+ lock->svid = be32_to_cpup(p);
+ fl->fl_pid = (pid_t)lock->svid;
+
+ error = decode_netobj(xdr, &lock->oh);
+ if (unlikely(error))
+ goto out;
+
+ p = xdr_inline_decode(xdr, 8 + 8);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
+ fl->fl_flags = FL_POSIX;
+ fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
+ p = xdr_decode_hyper(p, &l_offset);
+ xdr_decode_hyper(p, &l_len);
+ end = l_offset + l_len - 1;
+
+ fl->fl_start = (loff_t)l_offset;
+ if (l_len == 0 || end < 0)
+ fl->fl_end = OFFSET_MAX;
+ else
+ fl->fl_end = (loff_t)end;
+ error = 0;
+out:
+ return error;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+ /* NB: client-side does not set lock->len */
+ u32 length = strlen(name);
+ __be32 *p;
+
+ BUG_ON(length > NLM_MAXSTRLEN);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, name, length);
+}
+
+/*
+ * struct nlm4_lock {
+ * string caller_name<LM_MAXSTRLEN>;
+ * netobj fh;
+ * netobj oh;
+ * int32 svid;
+ * uint64 l_offset;
+ * uint64 l_len;
+ * };
+ */
+static void encode_nlm4_lock(struct xdr_stream *xdr,
+ const struct nlm_lock *lock)
+{
+ u64 l_offset, l_len;
+ __be32 *p;
+
+ encode_caller_name(xdr, lock->caller);
+ encode_fh(xdr, &lock->fh);
+ encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+ p = xdr_reserve_space(xdr, 4 + 8 + 8);
+ *p++ = cpu_to_be32(lock->svid);
+
+ nlm4_compute_offsets(lock, &l_offset, &l_len);
+ p = xdr_encode_hyper(p, l_offset);
+ xdr_encode_hyper(p, l_len);
+}
+
+
+/*
+ * NLMv4 XDR encode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ * struct nlm4_testargs {
+ * netobj cookie;
+ * bool exclusive;
+ * struct nlm4_lock alock;
+ * };
+ */
+static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ * struct nlm4_lockargs {
+ * netobj cookie;
+ * bool block;
+ * bool exclusive;
+ * struct nlm4_lock alock;
+ * bool reclaim;
+ * int state;
+ * };
+ */
+static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_bool(xdr, args->block);
+ encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_nlm4_lock(xdr, lock);
+ encode_bool(xdr, args->reclaim);
+ encode_int32(xdr, args->state);
+}
+
+/*
+ * struct nlm4_cancargs {
+ * netobj cookie;
+ * bool block;
+ * bool exclusive;
+ * struct nlm4_lock alock;
+ * };
+ */
+static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_bool(xdr, args->block);
+ encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ * struct nlm4_unlockargs {
+ * netobj cookie;
+ * struct nlm4_lock alock;
+ * };
+ */
+static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ * struct nlm4_res {
+ * netobj cookie;
+ * nlm4_stat stat;
+ * };
+ */
+static void nlm4_xdr_enc_res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_res *result)
+{
+ encode_cookie(xdr, &result->cookie);
+ encode_nlm4_stat(xdr, result->status);
+}
+
+/*
+ * union nlm4_testrply switch (nlm4_stats stat) {
+ * case NLM4_DENIED:
+ * struct nlm4_holder holder;
+ * default:
+ * void;
+ * };
+ *
+ * struct nlm4_testres {
+ * netobj cookie;
+ * nlm4_testrply test_stat;
+ * };
+ */
+static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_res *result)
+{
+ encode_cookie(xdr, &result->cookie);
+ encode_nlm4_stat(xdr, result->status);
+ if (result->status == nlm_lck_denied)
+ encode_nlm4_holder(xdr, result);
+}
+
+
+/*
+ * NLMv4 XDR decode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ * union nlm4_testrply switch (nlm4_stats stat) {
+ * case NLM4_DENIED:
+ * struct nlm4_holder holder;
+ * default:
+ * void;
+ * };
+ *
+ * struct nlm4_testres {
+ * netobj cookie;
+ * nlm4_testrply test_stat;
+ * };
+ */
+static int decode_nlm4_testrply(struct xdr_stream *xdr,
+ struct nlm_res *result)
+{
+ int error;
+
+ error = decode_nlm4_stat(xdr, &result->status);
+ if (unlikely(error))
+ goto out;
+ if (result->status == nlm_lck_denied)
+ error = decode_nlm4_holder(xdr, result);
+out:
+ return error;
+}
+
+static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nlm_res *result)
+{
+ int error;
+
+ error = decode_cookie(xdr, &result->cookie);
+ if (unlikely(error))
+ goto out;
+ error = decode_nlm4_testrply(xdr, result);
+out:
+ return error;
+}
+
+/*
+ * struct nlm4_res {
+ * netobj cookie;
+ * nlm4_stat stat;
+ * };
+ */
+static int nlm4_xdr_dec_res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nlm_res *result)
+{
+ int error;
+
+ error = decode_cookie(xdr, &result->cookie);
+ if (unlikely(error))
+ goto out;
+ error = decode_nlm4_stat(xdr, &result->status);
+out:
+ return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm4_xdr_dec_norep NULL
+
+#define PROC(proc, argtype, restype) \
+[NLMPROC_##proc] = { \
+ .p_proc = NLMPROC_##proc, \
+ .p_encode = (kxdreproc_t)nlm4_xdr_enc_##argtype, \
+ .p_decode = (kxdrdproc_t)nlm4_xdr_dec_##restype, \
+ .p_arglen = NLM4_##argtype##_sz, \
+ .p_replen = NLM4_##restype##_sz, \
+ .p_statidx = NLMPROC_##proc, \
+ .p_name = #proc, \
+ }
+
+static struct rpc_procinfo nlm4_procedures[] = {
+ PROC(TEST, testargs, testres),
+ PROC(LOCK, lockargs, res),
+ PROC(CANCEL, cancargs, res),
+ PROC(UNLOCK, unlockargs, res),
+ PROC(GRANTED, testargs, res),
+ PROC(TEST_MSG, testargs, norep),
+ PROC(LOCK_MSG, lockargs, norep),
+ PROC(CANCEL_MSG, cancargs, norep),
+ PROC(UNLOCK_MSG, unlockargs, norep),
+ PROC(GRANTED_MSG, testargs, norep),
+ PROC(TEST_RES, testres, norep),
+ PROC(LOCK_RES, res, norep),
+ PROC(CANCEL_RES, res, norep),
+ PROC(UNLOCK_RES, res, norep),
+ PROC(GRANTED_RES, res, norep),
+};
+
+struct rpc_version nlm_version4 = {
+ .number = 4,
+ .nrprocs = ARRAY_SIZE(nlm4_procedures),
+ .procs = nlm4_procedures,
+};
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 25509eb28fd..8d4ea8351e3 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(nlmclnt_init);
*/
void nlmclnt_done(struct nlm_host *host)
{
- nlm_release_host(host);
+ nlmclnt_release_host(host);
lockd_down();
}
EXPORT_SYMBOL_GPL(nlmclnt_done);
@@ -273,7 +273,7 @@ restart:
spin_unlock(&nlm_blocked_lock);
/* Release host handle after use */
- nlm_release_host(host);
+ nlmclnt_release_host(host);
lockd_down();
return 0;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 332c54cf75e..adb45ec9038 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -58,7 +58,7 @@ static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
return;
list_del(&lockowner->list);
spin_unlock(&lockowner->host->h_lock);
- nlm_release_host(lockowner->host);
+ nlmclnt_release_host(lockowner->host);
kfree(lockowner);
}
@@ -207,22 +207,22 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
printk("nlm_alloc_call: failed, waiting for memory\n");
schedule_timeout_interruptible(5*HZ);
}
- nlm_release_host(host);
+ nlmclnt_release_host(host);
return NULL;
}
-void nlm_release_call(struct nlm_rqst *call)
+void nlmclnt_release_call(struct nlm_rqst *call)
{
if (!atomic_dec_and_test(&call->a_count))
return;
- nlm_release_host(call->a_host);
+ nlmclnt_release_host(call->a_host);
nlmclnt_release_lockargs(call);
kfree(call);
}
static void nlmclnt_rpc_release(void *data)
{
- nlm_release_call(data);
+ nlmclnt_release_call(data);
}
static int nlm_wait_on_grace(wait_queue_head_t *queue)
@@ -436,7 +436,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
status = nlm_stat_to_errno(req->a_res.status);
}
out:
- nlm_release_call(req);
+ nlmclnt_release_call(req);
return status;
}
@@ -593,7 +593,7 @@ again:
out_unblock:
nlmclnt_finish_block(block);
out:
- nlm_release_call(req);
+ nlmclnt_release_call(req);
return status;
out_unlock:
/* Fatal error: ensure that we remove the lock altogether */
@@ -694,7 +694,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
/* What to do now? I'm out of my depth... */
status = -ENOLCK;
out:
- nlm_release_call(req);
+ nlmclnt_release_call(req);
return status;
}
@@ -755,7 +755,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status == 0 && req->a_res.status == nlm_lck_denied)
status = -ENOLCK;
- nlm_release_call(req);
+ nlmclnt_release_call(req);
return status;
}
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
new file mode 100644
index 00000000000..180ac34feb9
--- /dev/null
+++ b/fs/lockd/clntxdr.c
@@ -0,0 +1,627 @@
+/*
+ * linux/fs/lockd/clntxdr.c
+ *
+ * XDR functions to encode/decode NLM version 3 RPC arguments and results.
+ * NLM version 3 is backwards compatible with NLM versions 1 and 2.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM_cookie_sz (1+(NLM_MAXCOOKIELEN>>2))
+#define NLM_caller_sz (1+(NLMCLNT_OHSIZE>>2))
+#define NLM_owner_sz (1+(NLMCLNT_OHSIZE>>2))
+#define NLM_fhandle_sz (1+(NFS2_FHSIZE>>2))
+#define NLM_lock_sz (3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz)
+#define NLM_holder_sz (4+NLM_owner_sz)
+
+#define NLM_testargs_sz (NLM_cookie_sz+1+NLM_lock_sz)
+#define NLM_lockargs_sz (NLM_cookie_sz+4+NLM_lock_sz)
+#define NLM_cancargs_sz (NLM_cookie_sz+2+NLM_lock_sz)
+#define NLM_unlockargs_sz (NLM_cookie_sz+NLM_lock_sz)
+
+#define NLM_testres_sz (NLM_cookie_sz+1+NLM_holder_sz)
+#define NLM_res_sz (NLM_cookie_sz+1)
+#define NLM_norep_sz (0)
+
+
+static s32 loff_t_to_s32(loff_t offset)
+{
+ s32 res;
+
+ if (offset >= NLM_OFFSET_MAX)
+ res = NLM_OFFSET_MAX;
+ else if (offset <= -NLM_OFFSET_MAX)
+ res = -NLM_OFFSET_MAX;
+ else
+ res = offset;
+ return res;
+}
+
+static void nlm_compute_offsets(const struct nlm_lock *lock,
+ u32 *l_offset, u32 *l_len)
+{
+ const struct file_lock *fl = &lock->fl;
+
+ BUG_ON(fl->fl_start > NLM_OFFSET_MAX);
+ BUG_ON(fl->fl_end > NLM_OFFSET_MAX &&
+ fl->fl_end != OFFSET_MAX);
+
+ *l_offset = loff_t_to_s32(fl->fl_start);
+ if (fl->fl_end == OFFSET_MAX)
+ *l_len = 0;
+ else
+ *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv3 basic data types
+ *
+ * Basic NLMv3 data types are not defined in an IETF standards
+ * document. X/Open has a description of these data types that
+ * is useful. See Chapter 10 of "Protocols for Interworking:
+ * XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions. For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(value);
+}
+
+/*
+ * typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+ const u8 *data, const unsigned int length)
+{
+ __be32 *p;
+
+ BUG_ON(length > XDR_MAX_NETOBJ);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+ struct xdr_netobj *obj)
+{
+ u32 length;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p++);
+ if (unlikely(length > XDR_MAX_NETOBJ))
+ goto out_size;
+ obj->len = length;
+ obj->data = (u8 *)p;
+ return 0;
+out_size:
+ dprintk("NFS: returned netobj was too long: %u\n", length);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+ const struct nlm_cookie *cookie)
+{
+ BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+ encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+ struct nlm_cookie *cookie)
+{
+ u32 length;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p++);
+ /* apparently HPUX can return empty cookies */
+ if (length == 0)
+ goto out_hpux;
+ if (length > NLM_MAXCOOKIELEN)
+ goto out_size;
+ p = xdr_inline_decode(xdr, length);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ cookie->len = length;
+ memcpy(cookie->data, p, length);
+ return 0;
+out_hpux:
+ cookie->len = 4;
+ memset(cookie->data, 0, 4);
+ return 0;
+out_size:
+ dprintk("NFS: returned cookie was too long: %u\n", length);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+ BUG_ON(fh->size != NFS2_FHSIZE);
+ encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE);
+}
+
+/*
+ * enum nlm_stats {
+ * LCK_GRANTED = 0,
+ * LCK_DENIED = 1,
+ * LCK_DENIED_NOLOCKS = 2,
+ * LCK_BLOCKED = 3,
+ * LCK_DENIED_GRACE_PERIOD = 4
+ * };
+ *
+ *
+ * struct nlm_stat {
+ * nlm_stats stat;
+ * };
+ *
+ * NB: we don't swap bytes for the NLM status values. The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+
+static void encode_nlm_stat(struct xdr_stream *xdr,
+ const __be32 stat)
+{
+ __be32 *p;
+
+ BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+ p = xdr_reserve_space(xdr, 4);
+ *p = stat;
+}
+
+static int decode_nlm_stat(struct xdr_stream *xdr,
+ __be32 *stat)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (unlikely(*p > nlm_lck_denied_grace_period))
+ goto out_enum;
+ *stat = *p;
+ return 0;
+out_enum:
+ dprintk("%s: server returned invalid nlm_stats value: %u\n",
+ __func__, be32_to_cpup(p));
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * struct nlm_holder {
+ * bool exclusive;
+ * int uppid;
+ * netobj oh;
+ * unsigned l_offset;
+ * unsigned l_len;
+ * };
+ */
+static void encode_nlm_holder(struct xdr_stream *xdr,
+ const struct nlm_res *result)
+{
+ const struct nlm_lock *lock = &result->lock;
+ u32 l_offset, l_len;
+ __be32 *p;
+
+ encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+ encode_int32(xdr, lock->svid);
+ encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+ p = xdr_reserve_space(xdr, 4 + 4);
+ nlm_compute_offsets(lock, &l_offset, &l_len);
+ *p++ = cpu_to_be32(l_offset);
+ *p = cpu_to_be32(l_len);
+}
+
+static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+ struct nlm_lock *lock = &result->lock;
+ struct file_lock *fl = &lock->fl;
+ u32 exclusive, l_offset, l_len;
+ int error;
+ __be32 *p;
+ s32 end;
+
+ memset(lock, 0, sizeof(*lock));
+ locks_init_lock(fl);
+
+ p = xdr_inline_decode(xdr, 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ exclusive = be32_to_cpup(p++);
+ lock->svid = be32_to_cpup(p);
+ fl->fl_pid = (pid_t)lock->svid;
+
+ error = decode_netobj(xdr, &lock->oh);
+ if (unlikely(error))
+ goto out;
+
+ p = xdr_inline_decode(xdr, 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
+ fl->fl_flags = FL_POSIX;
+ fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
+ l_offset = be32_to_cpup(p++);
+ l_len = be32_to_cpup(p);
+ end = l_offset + l_len - 1;
+
+ fl->fl_start = (loff_t)l_offset;
+ if (l_len == 0 || end < 0)
+ fl->fl_end = OFFSET_MAX;
+ else
+ fl->fl_end = (loff_t)end;
+ error = 0;
+out:
+ return error;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+ /* NB: client-side does not set lock->len */
+ u32 length = strlen(name);
+ __be32 *p;
+
+ BUG_ON(length > NLM_MAXSTRLEN);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, name, length);
+}
+
+/*
+ * struct nlm_lock {
+ * string caller_name<LM_MAXSTRLEN>;
+ * netobj fh;
+ * netobj oh;
+ * int uppid;
+ * unsigned l_offset;
+ * unsigned l_len;
+ * };
+ */
+static void encode_nlm_lock(struct xdr_stream *xdr,
+ const struct nlm_lock *lock)
+{
+ u32 l_offset, l_len;
+ __be32 *p;
+
+ encode_caller_name(xdr, lock->caller);
+ encode_fh(xdr, &lock->fh);
+ encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(lock->svid);
+
+ nlm_compute_offsets(lock, &l_offset, &l_len);
+ *p++ = cpu_to_be32(l_offset);
+ *p = cpu_to_be32(l_len);
+}
+
+
+/*
+ * NLMv3 XDR encode functions
+ *
+ * NLMv3 argument types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ * struct nlm_testargs {
+ * netobj cookie;
+ * bool exclusive;
+ * struct nlm_lock alock;
+ * };
+ */
+static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_nlm_lock(xdr, lock);
+}
+
+/*
+ * struct nlm_lockargs {
+ * netobj cookie;
+ * bool block;
+ * bool exclusive;
+ * struct nlm_lock alock;
+ * bool reclaim;
+ * int state;
+ * };
+ */
+static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_bool(xdr, args->block);
+ encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_nlm_lock(xdr, lock);
+ encode_bool(xdr, args->reclaim);
+ encode_int32(xdr, args->state);
+}
+
+/*
+ * struct nlm_cancargs {
+ * netobj cookie;
+ * bool block;
+ * bool exclusive;
+ * struct nlm_lock alock;
+ * };
+ */
+static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_bool(xdr, args->block);
+ encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+ encode_nlm_lock(xdr, lock);
+}
+
+/*
+ * struct nlm_unlockargs {
+ * netobj cookie;
+ * struct nlm_lock alock;
+ * };
+ */
+static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_args *args)
+{
+ const struct nlm_lock *lock = &args->lock;
+
+ encode_cookie(xdr, &args->cookie);
+ encode_nlm_lock(xdr, lock);
+}
+
+/*
+ * struct nlm_res {
+ * netobj cookie;
+ * nlm_stat stat;
+ * };
+ */
+static void nlm_xdr_enc_res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_res *result)
+{
+ encode_cookie(xdr, &result->cookie);
+ encode_nlm_stat(xdr, result->status);
+}
+
+/*
+ * union nlm_testrply switch (nlm_stats stat) {
+ * case LCK_DENIED:
+ * struct nlm_holder holder;
+ * default:
+ * void;
+ * };
+ *
+ * struct nlm_testres {
+ * netobj cookie;
+ * nlm_testrply test_stat;
+ * };
+ */
+static void encode_nlm_testrply(struct xdr_stream *xdr,
+ const struct nlm_res *result)
+{
+ if (result->status == nlm_lck_denied)
+ encode_nlm_holder(xdr, result);
+}
+
+static void nlm_xdr_enc_testres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nlm_res *result)
+{
+ encode_cookie(xdr, &result->cookie);
+ encode_nlm_stat(xdr, result->status);
+ encode_nlm_testrply(xdr, result);
+}
+
+
+/*
+ * NLMv3 XDR decode functions
+ *
+ * NLMv3 result types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ * union nlm_testrply switch (nlm_stats stat) {
+ * case LCK_DENIED:
+ * struct nlm_holder holder;
+ * default:
+ * void;
+ * };
+ *
+ * struct nlm_testres {
+ * netobj cookie;
+ * nlm_testrply test_stat;
+ * };
+ */
+static int decode_nlm_testrply(struct xdr_stream *xdr,
+ struct nlm_res *result)
+{
+ int error;
+
+ error = decode_nlm_stat(xdr, &result->status);
+ if (unlikely(error))
+ goto out;
+ if (result->status == nlm_lck_denied)
+ error = decode_nlm_holder(xdr, result);
+out:
+ return error;
+}
+
+static int nlm_xdr_dec_testres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nlm_res *result)
+{
+ int error;
+
+ error = decode_cookie(xdr, &result->cookie);
+ if (unlikely(error))
+ goto out;
+ error = decode_nlm_testrply(xdr, result);
+out:
+ return error;
+}
+
+/*
+ * struct nlm_res {
+ * netobj cookie;
+ * nlm_stat stat;
+ * };
+ */
+static int nlm_xdr_dec_res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nlm_res *result)
+{
+ int error;
+
+ error = decode_cookie(xdr, &result->cookie);
+ if (unlikely(error))
+ goto out;
+ error = decode_nlm_stat(xdr, &result->status);
+out:
+ return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm_xdr_dec_norep NULL
+
+#define PROC(proc, argtype, restype) \
+[NLMPROC_##proc] = { \
+ .p_proc = NLMPROC_##proc, \
+ .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \
+ .p_decode = (kxdrdproc_t)nlm_xdr_dec_##restype, \
+ .p_arglen = NLM_##argtype##_sz, \
+ .p_replen = NLM_##restype##_sz, \
+ .p_statidx = NLMPROC_##proc, \
+ .p_name = #proc, \
+ }
+
+static struct rpc_procinfo nlm_procedures[] = {
+ PROC(TEST, testargs, testres),
+ PROC(LOCK, lockargs, res),
+ PROC(CANCEL, cancargs, res),
+ PROC(UNLOCK, unlockargs, res),
+ PROC(GRANTED, testargs, res),
+ PROC(TEST_MSG, testargs, norep),
+ PROC(LOCK_MSG, lockargs, norep),
+ PROC(CANCEL_MSG, cancargs, norep),
+ PROC(UNLOCK_MSG, unlockargs, norep),
+ PROC(GRANTED_MSG, testargs, norep),
+ PROC(TEST_RES, testres, norep),
+ PROC(LOCK_RES, res, norep),
+ PROC(CANCEL_RES, res, norep),
+ PROC(UNLOCK_RES, res, norep),
+ PROC(GRANTED_RES, res, norep),
+};
+
+static struct rpc_version nlm_version1 = {
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nlm_procedures),
+ .procs = nlm_procedures,
+};
+
+static struct rpc_version nlm_version3 = {
+ .number = 3,
+ .nrprocs = ARRAY_SIZE(nlm_procedures),
+ .procs = nlm_procedures,
+};
+
+static struct rpc_version *nlm_versions[] = {
+ [1] = &nlm_version1,
+ [3] = &nlm_version3,
+#ifdef CONFIG_LOCKD_V4
+ [4] = &nlm_version4,
+#endif
+};
+
+static struct rpc_stat nlm_rpc_stats;
+
+struct rpc_program nlm_program = {
+ .name = "lockd",
+ .number = NLM_PROGRAM,
+ .nrvers = ARRAY_SIZE(nlm_versions),
+ .version = nlm_versions,
+ .stats = &nlm_rpc_stats,
+};
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ed0c59fe23c..5f1bcb2f06f 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -25,9 +25,22 @@
#define NLM_HOST_EXPIRE (300 * HZ)
#define NLM_HOST_COLLECT (120 * HZ)
-static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
+static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
+static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
+
+#define for_each_host(host, pos, chain, table) \
+ for ((chain) = (table); \
+ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+ hlist_for_each_entry((host), (pos), (chain), h_hash)
+
+#define for_each_host_safe(host, pos, next, chain, table) \
+ for ((chain) = (table); \
+ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+ hlist_for_each_entry_safe((host), (pos), (next), \
+ (chain), h_hash)
+
static unsigned long next_gc;
-static int nrhosts;
+static unsigned long nrhosts;
static DEFINE_MUTEX(nlm_host_mutex);
static void nlm_gc_hosts(void);
@@ -40,8 +53,6 @@ struct nlm_lookup_host_info {
const u32 version; /* NLM version to search for */
const char *hostname; /* remote's hostname */
const size_t hostname_len; /* it's length */
- const struct sockaddr *src_sap; /* our address (optional) */
- const size_t src_len; /* it's length */
const int noresvport; /* use non-priv port */
};
@@ -88,127 +99,83 @@ static unsigned int nlm_hash_address(const struct sockaddr *sap)
}
/*
- * Common host lookup routine for server & client
+ * Allocate and initialize an nlm_host. Common to both client and server.
*/
-static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
+static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
+ struct nsm_handle *nsm)
{
- struct hlist_head *chain;
- struct hlist_node *pos;
- struct nlm_host *host;
- struct nsm_handle *nsm = NULL;
-
- mutex_lock(&nlm_host_mutex);
+ struct nlm_host *host = NULL;
+ unsigned long now = jiffies;
- if (time_after_eq(jiffies, next_gc))
- nlm_gc_hosts();
-
- /* We may keep several nlm_host objects for a peer, because each
- * nlm_host is identified by
- * (address, protocol, version, server/client)
- * We could probably simplify this a little by putting all those
- * different NLM rpc_clients into one single nlm_host object.
- * This would allow us to have one nlm_host per address.
- */
- chain = &nlm_hosts[nlm_hash_address(ni->sap)];
- hlist_for_each_entry(host, pos, chain, h_hash) {
- if (!rpc_cmp_addr(nlm_addr(host), ni->sap))
- continue;
-
- /* See if we have an NSM handle for this client */
- if (!nsm)
- nsm = host->h_nsmhandle;
-
- if (host->h_proto != ni->protocol)
- continue;
- if (host->h_version != ni->version)
- continue;
- if (host->h_server != ni->server)
- continue;
- if (ni->server && ni->src_len != 0 &&
- !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap))
- continue;
-
- /* Move to head of hash chain. */
- hlist_del(&host->h_hash);
- hlist_add_head(&host->h_hash, chain);
-
- nlm_get_host(host);
- dprintk("lockd: nlm_lookup_host found host %s (%s)\n",
- host->h_name, host->h_addrbuf);
- goto out;
- }
-
- /*
- * The host wasn't in our hash table. If we don't
- * have an NSM handle for it yet, create one.
- */
- if (nsm)
+ if (nsm != NULL)
atomic_inc(&nsm->sm_count);
else {
host = NULL;
nsm = nsm_get_handle(ni->sap, ni->salen,
ni->hostname, ni->hostname_len);
- if (!nsm) {
- dprintk("lockd: nlm_lookup_host failed; "
- "no nsm handle\n");
+ if (unlikely(nsm == NULL)) {
+ dprintk("lockd: %s failed; no nsm handle\n",
+ __func__);
goto out;
}
}
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host) {
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (unlikely(host == NULL)) {
+ dprintk("lockd: %s failed; no memory\n", __func__);
nsm_release(nsm);
- dprintk("lockd: nlm_lookup_host failed; no memory\n");
goto out;
}
- host->h_name = nsm->sm_name;
- host->h_addrbuf = nsm->sm_addrbuf;
+
memcpy(nlm_addr(host), ni->sap, ni->salen);
- host->h_addrlen = ni->salen;
+ host->h_addrlen = ni->salen;
rpc_set_port(nlm_addr(host), 0);
- memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
- host->h_srcaddrlen = ni->src_len;
+ host->h_srcaddrlen = 0;
+
+ host->h_rpcclnt = NULL;
+ host->h_name = nsm->sm_name;
host->h_version = ni->version;
host->h_proto = ni->protocol;
- host->h_rpcclnt = NULL;
- mutex_init(&host->h_mutex);
- host->h_nextrebind = jiffies + NLM_HOST_REBIND;
- host->h_expires = jiffies + NLM_HOST_EXPIRE;
- atomic_set(&host->h_count, 1);
+ host->h_reclaiming = 0;
+ host->h_server = ni->server;
+ host->h_noresvport = ni->noresvport;
+ host->h_inuse = 0;
init_waitqueue_head(&host->h_gracewait);
init_rwsem(&host->h_rwsem);
- host->h_state = 0; /* pseudo NSM state */
- host->h_nsmstate = 0; /* real NSM state */
- host->h_nsmhandle = nsm;
- host->h_server = ni->server;
- host->h_noresvport = ni->noresvport;
- hlist_add_head(&host->h_hash, chain);
+ host->h_state = 0;
+ host->h_nsmstate = 0;
+ host->h_pidcount = 0;
+ atomic_set(&host->h_count, 1);
+ mutex_init(&host->h_mutex);
+ host->h_nextrebind = now + NLM_HOST_REBIND;
+ host->h_expires = now + NLM_HOST_EXPIRE;
INIT_LIST_HEAD(&host->h_lockowners);
spin_lock_init(&host->h_lock);
INIT_LIST_HEAD(&host->h_granted);
INIT_LIST_HEAD(&host->h_reclaim);
-
- nrhosts++;
-
- dprintk("lockd: nlm_lookup_host created host %s\n",
- host->h_name);
+ host->h_nsmhandle = nsm;
+ host->h_addrbuf = nsm->sm_addrbuf;
out:
- mutex_unlock(&nlm_host_mutex);
return host;
}
/*
- * Destroy a host
+ * Destroy an nlm_host and free associated resources
+ *
+ * Caller must hold nlm_host_mutex.
*/
-static void
-nlm_destroy_host(struct nlm_host *host)
+static void nlm_destroy_host_locked(struct nlm_host *host)
{
struct rpc_clnt *clnt;
+ dprintk("lockd: destroy host %s\n", host->h_name);
+
BUG_ON(!list_empty(&host->h_lockowners));
BUG_ON(atomic_read(&host->h_count));
+ hlist_del_init(&host->h_hash);
+
nsm_unmonitor(host);
nsm_release(host->h_nsmhandle);
@@ -216,6 +183,8 @@ nlm_destroy_host(struct nlm_host *host)
if (clnt != NULL)
rpc_shutdown_client(clnt);
kfree(host);
+
+ nrhosts--;
}
/**
@@ -249,12 +218,76 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
.hostname_len = strlen(hostname),
.noresvport = noresvport,
};
+ struct hlist_head *chain;
+ struct hlist_node *pos;
+ struct nlm_host *host;
+ struct nsm_handle *nsm = NULL;
dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
(hostname ? hostname : "<none>"), version,
(protocol == IPPROTO_UDP ? "udp" : "tcp"));
- return nlm_lookup_host(&ni);
+ mutex_lock(&nlm_host_mutex);
+
+ chain = &nlm_client_hosts[nlm_hash_address(sap)];
+ hlist_for_each_entry(host, pos, chain, h_hash) {
+ if (!rpc_cmp_addr(nlm_addr(host), sap))
+ continue;
+
+ /* Same address. Share an NSM handle if we already have one */
+ if (nsm == NULL)
+ nsm = host->h_nsmhandle;
+
+ if (host->h_proto != protocol)
+ continue;
+ if (host->h_version != version)
+ continue;
+
+ nlm_get_host(host);
+ dprintk("lockd: %s found host %s (%s)\n", __func__,
+ host->h_name, host->h_addrbuf);
+ goto out;
+ }
+
+ host = nlm_alloc_host(&ni, nsm);
+ if (unlikely(host == NULL))
+ goto out;
+
+ hlist_add_head(&host->h_hash, chain);
+ nrhosts++;
+
+ dprintk("lockd: %s created host %s (%s)\n", __func__,
+ host->h_name, host->h_addrbuf);
+
+out:
+ mutex_unlock(&nlm_host_mutex);
+ return host;
+}
+
+/**
+ * nlmclnt_release_host - release client nlm_host
+ * @host: nlm_host to release
+ *
+ */
+void nlmclnt_release_host(struct nlm_host *host)
+{
+ if (host == NULL)
+ return;
+
+ dprintk("lockd: release client host %s\n", host->h_name);
+
+ BUG_ON(atomic_read(&host->h_count) < 0);
+ BUG_ON(host->h_server);
+
+ if (atomic_dec_and_test(&host->h_count)) {
+ BUG_ON(!list_empty(&host->h_lockowners));
+ BUG_ON(!list_empty(&host->h_granted));
+ BUG_ON(!list_empty(&host->h_reclaim));
+
+ mutex_lock(&nlm_host_mutex);
+ nlm_destroy_host_locked(host);
+ mutex_unlock(&nlm_host_mutex);
+ }
}
/**
@@ -279,12 +312,18 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const char *hostname,
const size_t hostname_len)
{
+ struct hlist_head *chain;
+ struct hlist_node *pos;
+ struct nlm_host *host = NULL;
+ struct nsm_handle *nsm = NULL;
struct sockaddr_in sin = {
.sin_family = AF_INET,
};
struct sockaddr_in6 sin6 = {
.sin6_family = AF_INET6,
};
+ struct sockaddr *src_sap;
+ size_t src_len = rqstp->rq_addrlen;
struct nlm_lookup_host_info ni = {
.server = 1,
.sap = svc_addr(rqstp),
@@ -293,27 +332,91 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
.version = rqstp->rq_vers,
.hostname = hostname,
.hostname_len = hostname_len,
- .src_len = rqstp->rq_addrlen,
};
dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
(int)hostname_len, hostname, rqstp->rq_vers,
(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
+ mutex_lock(&nlm_host_mutex);
+
switch (ni.sap->sa_family) {
case AF_INET:
sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr;
- ni.src_sap = (struct sockaddr *)&sin;
+ src_sap = (struct sockaddr *)&sin;
break;
case AF_INET6:
ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6);
- ni.src_sap = (struct sockaddr *)&sin6;
+ src_sap = (struct sockaddr *)&sin6;
break;
default:
- return NULL;
+ dprintk("lockd: %s failed; unrecognized address family\n",
+ __func__);
+ goto out;
+ }
+
+ if (time_after_eq(jiffies, next_gc))
+ nlm_gc_hosts();
+
+ chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
+ hlist_for_each_entry(host, pos, chain, h_hash) {
+ if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
+ continue;
+
+ /* Same address. Share an NSM handle if we already have one */
+ if (nsm == NULL)
+ nsm = host->h_nsmhandle;
+
+ if (host->h_proto != ni.protocol)
+ continue;
+ if (host->h_version != ni.version)
+ continue;
+ if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap))
+ continue;
+
+ /* Move to head of hash chain. */
+ hlist_del(&host->h_hash);
+ hlist_add_head(&host->h_hash, chain);
+
+ nlm_get_host(host);
+ dprintk("lockd: %s found host %s (%s)\n",
+ __func__, host->h_name, host->h_addrbuf);
+ goto out;
}
- return nlm_lookup_host(&ni);
+ host = nlm_alloc_host(&ni, nsm);
+ if (unlikely(host == NULL))
+ goto out;
+
+ memcpy(nlm_srcaddr(host), src_sap, src_len);
+ host->h_srcaddrlen = src_len;
+ hlist_add_head(&host->h_hash, chain);
+ nrhosts++;
+
+ dprintk("lockd: %s created host %s (%s)\n",
+ __func__, host->h_name, host->h_addrbuf);
+
+out:
+ mutex_unlock(&nlm_host_mutex);
+ return host;
+}
+
+/**
+ * nlmsvc_release_host - release server nlm_host
+ * @host: nlm_host to release
+ *
+ * Host is destroyed later in nlm_gc_host().
+ */
+void nlmsvc_release_host(struct nlm_host *host)
+{
+ if (host == NULL)
+ return;
+
+ dprintk("lockd: release server host %s\n", host->h_name);
+
+ BUG_ON(atomic_read(&host->h_count) < 0);
+ BUG_ON(!host->h_server);
+ atomic_dec(&host->h_count);
}
/*
@@ -413,20 +516,28 @@ struct nlm_host * nlm_get_host(struct nlm_host *host)
return host;
}
-/*
- * Release NLM host after use
- */
-void nlm_release_host(struct nlm_host *host)
+static struct nlm_host *next_host_state(struct hlist_head *cache,
+ struct nsm_handle *nsm,
+ const struct nlm_reboot *info)
{
- if (host != NULL) {
- dprintk("lockd: release host %s\n", host->h_name);
- BUG_ON(atomic_read(&host->h_count) < 0);
- if (atomic_dec_and_test(&host->h_count)) {
- BUG_ON(!list_empty(&host->h_lockowners));
- BUG_ON(!list_empty(&host->h_granted));
- BUG_ON(!list_empty(&host->h_reclaim));
+ struct nlm_host *host = NULL;
+ struct hlist_head *chain;
+ struct hlist_node *pos;
+
+ mutex_lock(&nlm_host_mutex);
+ for_each_host(host, pos, chain, cache) {
+ if (host->h_nsmhandle == nsm
+ && host->h_nsmstate != info->state) {
+ host->h_nsmstate = info->state;
+ host->h_state++;
+
+ nlm_get_host(host);
+ goto out;
}
}
+out:
+ mutex_unlock(&nlm_host_mutex);
+ return host;
}
/**
@@ -438,8 +549,6 @@ void nlm_release_host(struct nlm_host *host)
*/
void nlm_host_rebooted(const struct nlm_reboot *info)
{
- struct hlist_head *chain;
- struct hlist_node *pos;
struct nsm_handle *nsm;
struct nlm_host *host;
@@ -452,32 +561,15 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
* lock for this.
* To avoid processing a host several times, we match the nsmstate.
*/
-again: mutex_lock(&nlm_host_mutex);
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash) {
- if (host->h_nsmhandle == nsm
- && host->h_nsmstate != info->state) {
- host->h_nsmstate = info->state;
- host->h_state++;
-
- nlm_get_host(host);
- mutex_unlock(&nlm_host_mutex);
-
- if (host->h_server) {
- /* We're server for this guy, just ditch
- * all the locks he held. */
- nlmsvc_free_host_resources(host);
- } else {
- /* He's the server, initiate lock recovery. */
- nlmclnt_recovery(host);
- }
-
- nlm_release_host(host);
- goto again;
- }
- }
+ while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) {
+ nlmsvc_free_host_resources(host);
+ nlmsvc_release_host(host);
}
- mutex_unlock(&nlm_host_mutex);
+ while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) {
+ nlmclnt_recovery(host);
+ nlmclnt_release_host(host);
+ }
+
nsm_release(nsm);
}
@@ -497,13 +589,11 @@ nlm_shutdown_hosts(void)
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash) {
- host->h_expires = jiffies - 1;
- if (host->h_rpcclnt) {
- rpc_shutdown_client(host->h_rpcclnt);
- host->h_rpcclnt = NULL;
- }
+ for_each_host(host, pos, chain, nlm_server_hosts) {
+ host->h_expires = jiffies - 1;
+ if (host->h_rpcclnt) {
+ rpc_shutdown_client(host->h_rpcclnt);
+ host->h_rpcclnt = NULL;
}
}
@@ -512,15 +602,13 @@ nlm_shutdown_hosts(void)
mutex_unlock(&nlm_host_mutex);
/* complain if any hosts are left */
- if (nrhosts) {
+ if (nrhosts != 0) {
printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
- dprintk("lockd: %d hosts left:\n", nrhosts);
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash) {
- dprintk(" %s (cnt %d use %d exp %ld)\n",
- host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires);
- }
+ dprintk("lockd: %lu hosts left:\n", nrhosts);
+ for_each_host(host, pos, chain, nlm_server_hosts) {
+ dprintk(" %s (cnt %d use %d exp %ld)\n",
+ host->h_name, atomic_read(&host->h_count),
+ host->h_inuse, host->h_expires);
}
}
}
@@ -538,29 +626,22 @@ nlm_gc_hosts(void)
struct nlm_host *host;
dprintk("lockd: host garbage collection\n");
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash)
- host->h_inuse = 0;
- }
+ for_each_host(host, pos, chain, nlm_server_hosts)
+ host->h_inuse = 0;
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources();
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
- if (atomic_read(&host->h_count) || host->h_inuse
- || time_before(jiffies, host->h_expires)) {
- dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
- host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires);
- continue;
- }
- dprintk("lockd: delete host %s\n", host->h_name);
- hlist_del_init(&host->h_hash);
-
- nlm_destroy_host(host);
- nrhosts--;
+ for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
+ if (atomic_read(&host->h_count) || host->h_inuse
+ || time_before(jiffies, host->h_expires)) {
+ dprintk("nlm_gc_hosts skipping %s "
+ "(cnt %d use %d exp %ld)\n",
+ host->h_name, atomic_read(&host->h_count),
+ host->h_inuse, host->h_expires);
+ continue;
}
+ nlm_destroy_host_locked(host);
}
next_gc = jiffies + NLM_HOST_COLLECT;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e0c91894964..23d7451b293 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -401,26 +401,22 @@ void nsm_release(struct nsm_handle *nsm)
* Status Monitor wire protocol.
*/
-static int encode_nsm_string(struct xdr_stream *xdr, const char *string)
+static void encode_nsm_string(struct xdr_stream *xdr, const char *string)
{
const u32 len = strlen(string);
__be32 *p;
- if (unlikely(len > SM_MAXSTRLEN))
- return -EIO;
- p = xdr_reserve_space(xdr, sizeof(u32) + len);
- if (unlikely(p == NULL))
- return -EIO;
+ BUG_ON(len > SM_MAXSTRLEN);
+ p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
- return 0;
}
/*
* "mon_name" specifies the host to be monitored.
*/
-static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
{
- return encode_nsm_string(xdr, argp->mon_name);
+ encode_nsm_string(xdr, argp->mon_name);
}
/*
@@ -429,35 +425,25 @@ static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
* (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name"
* has changed.
*/
-static int encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
{
- int status;
__be32 *p;
- status = encode_nsm_string(xdr, utsname()->nodename);
- if (unlikely(status != 0))
- return status;
- p = xdr_reserve_space(xdr, 3 * sizeof(u32));
- if (unlikely(p == NULL))
- return -EIO;
- *p++ = htonl(argp->prog);
- *p++ = htonl(argp->vers);
- *p++ = htonl(argp->proc);
- return 0;
+ encode_nsm_string(xdr, utsname()->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+ *p = cpu_to_be32(argp->proc);
}
/*
* The "mon_id" argument specifies the non-private arguments
* of an NSMPROC_MON or NSMPROC_UNMON call.
*/
-static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
{
- int status;
-
- status = encode_mon_name(xdr, argp);
- if (unlikely(status != 0))
- return status;
- return encode_my_id(xdr, argp);
+ encode_mon_name(xdr, argp);
+ encode_my_id(xdr, argp);
}
/*
@@ -465,68 +451,56 @@ static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
* by the NSMPROC_MON call. This information will be supplied in the
* NLMPROC_SM_NOTIFY call.
*/
-static int encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
{
__be32 *p;
p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
- if (unlikely(p == NULL))
- return -EIO;
xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
- return 0;
}
-static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p,
- const struct nsm_args *argp)
+static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nsm_args *argp)
{
- struct xdr_stream xdr;
- int status;
-
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- status = encode_mon_id(&xdr, argp);
- if (unlikely(status))
- return status;
- return encode_priv(&xdr, argp);
+ encode_mon_id(xdr, argp);
+ encode_priv(xdr, argp);
}
-static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p,
- const struct nsm_args *argp)
+static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nsm_args *argp)
{
- struct xdr_stream xdr;
-
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- return encode_mon_id(&xdr, argp);
+ encode_mon_id(xdr, argp);
}
-static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p,
- struct nsm_res *resp)
+static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nsm_res *resp)
{
- struct xdr_stream xdr;
+ __be32 *p;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- p = xdr_inline_decode(&xdr, 2 * sizeof(u32));
+ p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(p == NULL))
return -EIO;
- resp->status = ntohl(*p++);
- resp->state = ntohl(*p);
+ resp->status = be32_to_cpup(p++);
+ resp->state = be32_to_cpup(p);
- dprintk("lockd: xdr_dec_stat_res status %d state %d\n",
- resp->status, resp->state);
+ dprintk("lockd: %s status %d state %d\n",
+ __func__, resp->status, resp->state);
return 0;
}
-static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p,
- struct nsm_res *resp)
+static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nsm_res *resp)
{
- struct xdr_stream xdr;
+ __be32 *p;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- p = xdr_inline_decode(&xdr, sizeof(u32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- resp->state = ntohl(*p);
+ resp->state = be32_to_cpup(p);
- dprintk("lockd: xdr_dec_stat state %d\n", resp->state);
+ dprintk("lockd: %s state %d\n", __func__, resp->state);
return 0;
}
@@ -542,8 +516,8 @@ static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p,
static struct rpc_procinfo nsm_procedures[] = {
[NSMPROC_MON] = {
.p_proc = NSMPROC_MON,
- .p_encode = (kxdrproc_t)xdr_enc_mon,
- .p_decode = (kxdrproc_t)xdr_dec_stat_res,
+ .p_encode = (kxdreproc_t)nsm_xdr_enc_mon,
+ .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat_res,
.p_arglen = SM_mon_sz,
.p_replen = SM_monres_sz,
.p_statidx = NSMPROC_MON,
@@ -551,8 +525,8 @@ static struct rpc_procinfo nsm_procedures[] = {
},
[NSMPROC_UNMON] = {
.p_proc = NSMPROC_UNMON,
- .p_encode = (kxdrproc_t)xdr_enc_unmon,
- .p_decode = (kxdrproc_t)xdr_dec_stat,
+ .p_encode = (kxdreproc_t)nsm_xdr_enc_unmon,
+ .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat,
.p_arglen = SM_mon_id_sz,
.p_replen = SM_unmonres_sz,
.p_statidx = NSMPROC_UNMON,
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 38d26119245..9a41fdc1951 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -51,7 +51,7 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
return 0;
no_locks:
- nlm_release_host(host);
+ nlmsvc_release_host(host);
if (error)
return error;
return nlm_lck_denied_nolocks;
@@ -92,7 +92,7 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
else
dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
}
@@ -134,7 +134,7 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
else
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
}
@@ -164,7 +164,7 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -197,7 +197,7 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = nlmsvc_unlock(file, &argp->lock);
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -229,7 +229,7 @@ static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
static void nlm4svc_callback_release(void *data)
{
- nlm_release_call(data);
+ nlmsvc_release_call(data);
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
@@ -261,7 +261,7 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
stat = func(rqstp, argp, &call->a_res);
if (stat != 0) {
- nlm_release_call(call);
+ nlmsvc_release_call(call);
return stat;
}
@@ -334,7 +334,7 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = nlmsvc_share_file(host, file, argp);
dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -367,7 +367,7 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = nlmsvc_unshare_file(host, file, argp);
dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -399,7 +399,7 @@ nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
nlmsvc_free_host_resources(host);
- nlm_release_host(host);
+ nlmsvc_release_host(host);
return rpc_success;
}
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index ef5659b211e..6e31695d046 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -46,6 +46,7 @@ static void nlmsvc_remove_block(struct nlm_block *block);
static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
static void nlmsvc_freegrantargs(struct nlm_rqst *call);
static const struct rpc_call_ops nlmsvc_grant_ops;
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie);
/*
* The list of blocked locks to retry
@@ -233,7 +234,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
failed_free:
kfree(block);
failed:
- nlm_release_call(call);
+ nlmsvc_release_call(call);
return NULL;
}
@@ -266,7 +267,7 @@ static void nlmsvc_free_block(struct kref *kref)
mutex_unlock(&file->f_mutex);
nlmsvc_freegrantargs(block->b_call);
- nlm_release_call(block->b_call);
+ nlmsvc_release_call(block->b_call);
nlm_release_file(block->b_file);
kfree(block->b_fl);
kfree(block);
@@ -934,3 +935,32 @@ nlmsvc_retry_blocked(void)
return timeout;
}
+
+#ifdef RPC_DEBUG
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
+{
+ /*
+ * We can get away with a static buffer because we're only
+ * called with BKL held.
+ */
+ static char buf[2*NLM_MAXCOOKIELEN+1];
+ unsigned int i, len = sizeof(buf);
+ char *p = buf;
+
+ len--; /* allow for trailing \0 */
+ if (len < 3)
+ return "???";
+ for (i = 0 ; i < cookie->len ; i++) {
+ if (len < 2) {
+ strcpy(p-3, "...");
+ break;
+ }
+ sprintf(p, "%02x", cookie->data[i]);
+ p += 2;
+ len -= 2;
+ }
+ *p = '\0';
+
+ return buf;
+}
+#endif
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0caea5310ac..d27aab11f32 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -80,7 +80,7 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
return 0;
no_locks:
- nlm_release_host(host);
+ nlmsvc_release_host(host);
if (error)
return error;
return nlm_lck_denied_nolocks;
@@ -122,7 +122,7 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
dprintk("lockd: TEST status %d vers %d\n",
ntohl(resp->status), rqstp->rq_vers);
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
}
@@ -164,7 +164,7 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
else
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
}
@@ -194,7 +194,7 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -227,7 +227,7 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -257,9 +257,17 @@ static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
-task->tk_status);
}
+void nlmsvc_release_call(struct nlm_rqst *call)
+{
+ if (!atomic_dec_and_test(&call->a_count))
+ return;
+ nlmsvc_release_host(call->a_host);
+ kfree(call);
+}
+
static void nlmsvc_callback_release(void *data)
{
- nlm_release_call(data);
+ nlmsvc_release_call(data);
}
static const struct rpc_call_ops nlmsvc_callback_ops = {
@@ -291,7 +299,7 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
stat = func(rqstp, argp, &call->a_res);
if (stat != 0) {
- nlm_release_call(call);
+ nlmsvc_release_call(call);
return stat;
}
@@ -366,7 +374,7 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = cast_status(nlmsvc_share_file(host, file, argp));
dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -399,7 +407,7 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
- nlm_release_host(host);
+ nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
@@ -431,7 +439,7 @@ nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
return rpc_success;
nlmsvc_free_host_resources(host);
- nlm_release_host(host);
+ nlmsvc_release_host(host);
return rpc_success;
}
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index b583ab0a4cb..964666c68a8 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -149,37 +149,6 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
}
/*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
- struct file_lock *fl = &lock->fl;
- __s32 start, len;
-
- if (!(p = xdr_encode_string(p, lock->caller))
- || !(p = nlm_encode_fh(p, &lock->fh))
- || !(p = nlm_encode_oh(p, &lock->oh)))
- return NULL;
-
- if (fl->fl_start > NLM_OFFSET_MAX
- || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
- return NULL;
-
- start = loff_t_to_s32(fl->fl_start);
- if (fl->fl_end == OFFSET_MAX)
- len = 0;
- else
- len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
-
- *p++ = htonl(lock->svid);
- *p++ = htonl(start);
- *p++ = htonl(len);
-
- return p;
-}
-
-/*
* Encode result of a TEST/TEST_MSG call
*/
static __be32 *
@@ -372,259 +341,3 @@ nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
-{
- return 0;
-}
-#endif
-
-static int
-nlmclt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm_encode_cookie(p, &argp->cookie)))
- return -EIO;
- *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
- if (!(p = nlm_encode_lock(p, lock)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm_decode_cookie(p, &resp->cookie)))
- return -EIO;
- resp->status = *p++;
- if (resp->status == nlm_lck_denied) {
- struct file_lock *fl = &resp->lock.fl;
- u32 excl;
- s32 start, len, end;
-
- memset(&resp->lock, 0, sizeof(resp->lock));
- locks_init_lock(fl);
- excl = ntohl(*p++);
- resp->lock.svid = ntohl(*p++);
- fl->fl_pid = (pid_t)resp->lock.svid;
- if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
- return -EIO;
-
- fl->fl_flags = FL_POSIX;
- fl->fl_type = excl? F_WRLCK : F_RDLCK;
- start = ntohl(*p++);
- len = ntohl(*p++);
- end = start + len - 1;
-
- fl->fl_start = s32_to_loff_t(start);
- if (len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = s32_to_loff_t(end);
- }
- return 0;
-}
-
-
-static int
-nlmclt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm_encode_cookie(p, &argp->cookie)))
- return -EIO;
- *p++ = argp->block? xdr_one : xdr_zero;
- *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
- if (!(p = nlm_encode_lock(p, lock)))
- return -EIO;
- *p++ = argp->reclaim? xdr_one : xdr_zero;
- *p++ = htonl(argp->state);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlmclt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm_encode_cookie(p, &argp->cookie)))
- return -EIO;
- *p++ = argp->block? xdr_one : xdr_zero;
- *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
- if (!(p = nlm_encode_lock(p, lock)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlmclt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm_encode_cookie(p, &argp->cookie)))
- return -EIO;
- if (!(p = nlm_encode_lock(p, lock)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlmclt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm_encode_cookie(p, &resp->cookie)))
- return -EIO;
- *p++ = resp->status;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlmclt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm_encode_testres(p, resp)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm_decode_cookie(p, &resp->cookie)))
- return -EIO;
- resp->status = *p++;
- return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM_void_sz 0
-#define NLM_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_fhandle_sz 1+XDR_QUADLEN(NFS2_FHSIZE)
-#define NLM_lock_sz 3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz
-#define NLM_holder_sz 4+NLM_owner_sz
-
-#define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz
-#define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz
-#define NLM_cancargs_sz NLM_cookie_sz+2+NLM_lock_sz
-#define NLM_unlockargs_sz NLM_cookie_sz+NLM_lock_sz
-
-#define NLM_testres_sz NLM_cookie_sz+1+NLM_holder_sz
-#define NLM_res_sz NLM_cookie_sz+1
-#define NLM_norep_sz 0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlmclt_decode_norep NULL
-
-#define PROC(proc, argtype, restype) \
-[NLMPROC_##proc] = { \
- .p_proc = NLMPROC_##proc, \
- .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \
- .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \
- .p_arglen = NLM_##argtype##_sz, \
- .p_replen = NLM_##restype##_sz, \
- .p_statidx = NLMPROC_##proc, \
- .p_name = #proc, \
- }
-
-static struct rpc_procinfo nlm_procedures[] = {
- PROC(TEST, testargs, testres),
- PROC(LOCK, lockargs, res),
- PROC(CANCEL, cancargs, res),
- PROC(UNLOCK, unlockargs, res),
- PROC(GRANTED, testargs, res),
- PROC(TEST_MSG, testargs, norep),
- PROC(LOCK_MSG, lockargs, norep),
- PROC(CANCEL_MSG, cancargs, norep),
- PROC(UNLOCK_MSG, unlockargs, norep),
- PROC(GRANTED_MSG, testargs, norep),
- PROC(TEST_RES, testres, norep),
- PROC(LOCK_RES, res, norep),
- PROC(CANCEL_RES, res, norep),
- PROC(UNLOCK_RES, res, norep),
- PROC(GRANTED_RES, res, norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
- PROC(SHARE, shareargs, shareres),
- PROC(UNSHARE, shareargs, shareres),
- PROC(NM_LOCK, lockargs, res),
- PROC(FREE_ALL, notify, void),
-#endif
-};
-
-static struct rpc_version nlm_version1 = {
- .number = 1,
- .nrprocs = 16,
- .procs = nlm_procedures,
-};
-
-static struct rpc_version nlm_version3 = {
- .number = 3,
- .nrprocs = 24,
- .procs = nlm_procedures,
-};
-
-static struct rpc_version * nlm_versions[] = {
- [1] = &nlm_version1,
- [3] = &nlm_version3,
-#ifdef CONFIG_LOCKD_V4
- [4] = &nlm_version4,
-#endif
-};
-
-static struct rpc_stat nlm_stats;
-
-struct rpc_program nlm_program = {
- .name = "lockd",
- .number = NLM_PROGRAM,
- .nrvers = ARRAY_SIZE(nlm_versions),
- .version = nlm_versions,
- .stats = &nlm_stats,
-};
-
-#ifdef RPC_DEBUG
-const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
-{
- /*
- * We can get away with a static buffer because we're only
- * called with BKL held.
- */
- static char buf[2*NLM_MAXCOOKIELEN+1];
- unsigned int i, len = sizeof(buf);
- char *p = buf;
-
- len--; /* allow for trailing \0 */
- if (len < 3)
- return "???";
- for (i = 0 ; i < cookie->len ; i++) {
- if (len < 2) {
- strcpy(p-3, "...");
- break;
- }
- sprintf(p, "%02x", cookie->data[i]);
- p += 2;
- len -= 2;
- }
- *p = '\0';
-
- return buf;
-}
-#endif
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index ad9dbbc9145..dfa4789cd46 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -93,15 +93,6 @@ nlm4_decode_fh(__be32 *p, struct nfs_fh *f)
return p + XDR_QUADLEN(f->size);
}
-static __be32 *
-nlm4_encode_fh(__be32 *p, struct nfs_fh *f)
-{
- *p++ = htonl(f->size);
- if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
- memcpy(p, f->data, f->size);
- return p + XDR_QUADLEN(f->size);
-}
-
/*
* Encode and decode owner handle
*/
@@ -112,12 +103,6 @@ nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh)
}
static __be32 *
-nlm4_encode_oh(__be32 *p, struct xdr_netobj *oh)
-{
- return xdr_encode_netobj(p, oh);
-}
-
-static __be32 *
nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
@@ -150,38 +135,6 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
}
/*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm4_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
- struct file_lock *fl = &lock->fl;
- __s64 start, len;
-
- if (!(p = xdr_encode_string(p, lock->caller))
- || !(p = nlm4_encode_fh(p, &lock->fh))
- || !(p = nlm4_encode_oh(p, &lock->oh)))
- return NULL;
-
- if (fl->fl_start > NLM4_OFFSET_MAX
- || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
- return NULL;
-
- *p++ = htonl(lock->svid);
-
- start = loff_t_to_s64(fl->fl_start);
- if (fl->fl_end == OFFSET_MAX)
- len = 0;
- else
- len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
-
- p = xdr_encode_hyper(p, start);
- p = xdr_encode_hyper(p, len);
-
- return p;
-}
-
-/*
* Encode result of a TEST/TEST_MSG call
*/
static __be32 *
@@ -379,211 +332,3 @@ nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlm4clt_decode_void(struct rpc_rqst *req, __be32 *p, void *ptr)
-{
- return 0;
-}
-#endif
-
-static int
-nlm4clt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
- return -EIO;
- *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
- if (!(p = nlm4_encode_lock(p, lock)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
- return -EIO;
- resp->status = *p++;
- if (resp->status == nlm_lck_denied) {
- struct file_lock *fl = &resp->lock.fl;
- u32 excl;
- __u64 start, len;
- __s64 end;
-
- memset(&resp->lock, 0, sizeof(resp->lock));
- locks_init_lock(fl);
- excl = ntohl(*p++);
- resp->lock.svid = ntohl(*p++);
- fl->fl_pid = (pid_t)resp->lock.svid;
- if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
- return -EIO;
-
- fl->fl_flags = FL_POSIX;
- fl->fl_type = excl? F_WRLCK : F_RDLCK;
- p = xdr_decode_hyper(p, &start);
- p = xdr_decode_hyper(p, &len);
- end = start + len - 1;
-
- fl->fl_start = s64_to_loff_t(start);
- if (len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = s64_to_loff_t(end);
- }
- return 0;
-}
-
-
-static int
-nlm4clt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
- return -EIO;
- *p++ = argp->block? xdr_one : xdr_zero;
- *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
- if (!(p = nlm4_encode_lock(p, lock)))
- return -EIO;
- *p++ = argp->reclaim? xdr_one : xdr_zero;
- *p++ = htonl(argp->state);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlm4clt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
- return -EIO;
- *p++ = argp->block? xdr_one : xdr_zero;
- *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
- if (!(p = nlm4_encode_lock(p, lock)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlm4clt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
- struct nlm_lock *lock = &argp->lock;
-
- if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
- return -EIO;
- if (!(p = nlm4_encode_lock(p, lock)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlm4clt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
- return -EIO;
- *p++ = resp->status;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlm4clt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm4_encode_testres(p, resp)))
- return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
-}
-
-static int
-nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
- if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
- return -EIO;
- resp->status = *p++;
- return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
-# error "NLM host name cannot be larger than NLM's maximum string length!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM4_void_sz 0
-#define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM4_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE)
-#define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz
-#define NLM4_holder_sz 6+NLM4_owner_sz
-
-#define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz
-#define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz
-#define NLM4_cancargs_sz NLM4_cookie_sz+2+NLM4_lock_sz
-#define NLM4_unlockargs_sz NLM4_cookie_sz+NLM4_lock_sz
-
-#define NLM4_testres_sz NLM4_cookie_sz+1+NLM4_holder_sz
-#define NLM4_res_sz NLM4_cookie_sz+1
-#define NLM4_norep_sz 0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlm4clt_decode_norep NULL
-
-#define PROC(proc, argtype, restype) \
-[NLMPROC_##proc] = { \
- .p_proc = NLMPROC_##proc, \
- .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \
- .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \
- .p_arglen = NLM4_##argtype##_sz, \
- .p_replen = NLM4_##restype##_sz, \
- .p_statidx = NLMPROC_##proc, \
- .p_name = #proc, \
- }
-
-static struct rpc_procinfo nlm4_procedures[] = {
- PROC(TEST, testargs, testres),
- PROC(LOCK, lockargs, res),
- PROC(CANCEL, cancargs, res),
- PROC(UNLOCK, unlockargs, res),
- PROC(GRANTED, testargs, res),
- PROC(TEST_MSG, testargs, norep),
- PROC(LOCK_MSG, lockargs, norep),
- PROC(CANCEL_MSG, cancargs, norep),
- PROC(UNLOCK_MSG, unlockargs, norep),
- PROC(GRANTED_MSG, testargs, norep),
- PROC(TEST_RES, testres, norep),
- PROC(LOCK_RES, res, norep),
- PROC(CANCEL_RES, res, norep),
- PROC(UNLOCK_RES, res, norep),
- PROC(GRANTED_RES, res, norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
- PROC(SHARE, shareargs, shareres),
- PROC(UNSHARE, shareargs, shareres),
- PROC(NM_LOCK, lockargs, res),
- PROC(FREE_ALL, notify, void),
-#endif
-};
-
-struct rpc_version nlm_version4 = {
- .number = 4,
- .nrprocs = 24,
- .procs = nlm4_procedures,
-};
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 93444747237..a25444ab2ba 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -76,18 +76,6 @@ EXPORT_SYMBOL(mb_cache_entry_find_first);
EXPORT_SYMBOL(mb_cache_entry_find_next);
#endif
-struct mb_cache {
- struct list_head c_cache_list;
- const char *c_name;
- atomic_t c_entry_count;
- int c_max_entries;
- int c_bucket_bits;
- struct kmem_cache *c_entry_cache;
- struct list_head *c_block_hash;
- struct list_head *c_index_hash;
-};
-
-
/*
* Global data: list of all mbcache's, lru list, and a spinlock for
* accessing cache data structures on SMP machines. The lru list is
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 93a8b3bd69e..199016528fc 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -16,9 +16,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/sunrpc/svcauth_gss.h>
-#if defined(CONFIG_NFS_V4_1)
#include <linux/sunrpc/bc_xprt.h>
-#endif
#include <net/inet_sock.h>
@@ -137,6 +135,33 @@ out_err:
#if defined(CONFIG_NFS_V4_1)
/*
+ * * CB_SEQUENCE operations will fail until the callback sessionid is set.
+ * */
+int nfs4_set_callback_sessionid(struct nfs_client *clp)
+{
+ struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv;
+ struct nfs4_sessionid *bc_sid;
+
+ if (!serv->sv_bc_xprt)
+ return -EINVAL;
+
+ /* on success freed in xprt_free */
+ bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL);
+ if (!bc_sid)
+ return -ENOMEM;
+ memcpy(bc_sid->data, &clp->cl_session->sess_id.data,
+ NFS4_MAX_SESSIONID_LEN);
+ spin_lock_bh(&serv->sv_cb_lock);
+ serv->sv_bc_xprt->xpt_bc_sid = bc_sid;
+ spin_unlock_bh(&serv->sv_cb_lock);
+ dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__,
+ ((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1],
+ ((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3],
+ serv->sv_bc_xprt);
+ return 0;
+}
+
+/*
* The callback service for NFSv4.1 callbacks
*/
static int
@@ -177,30 +202,38 @@ nfs41_callback_svc(void *vrqstp)
struct svc_rqst *
nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
{
- struct svc_xprt *bc_xprt;
- struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
+ struct svc_rqst *rqstp;
+ int ret;
- dprintk("--> %s\n", __func__);
- /* Create a svc_sock for the service */
- bc_xprt = svc_sock_create(serv, xprt->prot);
- if (!bc_xprt)
+ /*
+ * Create an svc_sock for the back channel service that shares the
+ * fore channel connection.
+ * Returns the input port (0) and sets the svc_serv bc_xprt on success
+ */
+ ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
+ SVC_SOCK_ANONYMOUS);
+ if (ret < 0) {
+ rqstp = ERR_PTR(ret);
goto out;
+ }
/*
* Save the svc_serv in the transport so that it can
* be referenced when the session backchannel is initialized
*/
- serv->bc_xprt = bc_xprt;
xprt->bc_serv = serv;
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
init_waitqueue_head(&serv->sv_cb_waitq);
rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
- if (IS_ERR(rqstp))
- svc_sock_destroy(bc_xprt);
+ if (IS_ERR(rqstp)) {
+ svc_xprt_put(serv->sv_bc_xprt);
+ serv->sv_bc_xprt = NULL;
+ }
out:
- dprintk("--> %s return %p\n", __func__, rqstp);
+ dprintk("--> %s return %ld\n", __func__,
+ IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
return rqstp;
}
@@ -233,6 +266,10 @@ static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct nfs_callback_data *cb_info)
{
}
+int nfs4_set_callback_sessionid(struct nfs_client *clp)
+{
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
/*
@@ -328,6 +365,9 @@ static int check_gss_callback_principal(struct nfs_client *clp,
struct rpc_clnt *r = clp->cl_rpcclient;
char *p = svc_gss_principal(rqstp);
+ /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
+ if (clp->cl_minorversion != 0)
+ return SVC_DROP;
/*
* It might just be a normal user principal, in which case
* userspace won't bother to tell us the name at all.
@@ -345,6 +385,23 @@ static int check_gss_callback_principal(struct nfs_client *clp,
return SVC_OK;
}
+/* pg_authenticate method helper */
+static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp)
+{
+ struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp);
+ int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0;
+
+ dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc);
+ if (svc_is_backchannel(rqstp))
+ /* Sessionid (usually) set after CB_NULL ping */
+ return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid,
+ is_cb_compound);
+ else
+ /* No callback identifier in pg_authenticate */
+ return nfs4_find_client_no_ident(svc_addr(rqstp));
+}
+
+/* pg_authenticate method for nfsv4 callback threads. */
static int nfs_callback_authenticate(struct svc_rqst *rqstp)
{
struct nfs_client *clp;
@@ -352,7 +409,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
int ret = SVC_OK;
/* Don't talk to strangers */
- clp = nfs_find_client(svc_addr(rqstp), 4);
+ clp = nfs_cb_find_client(rqstp);
if (clp == NULL)
return SVC_DROP;
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 85a7cfd1b8d..d3b44f9bd74 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -34,10 +34,17 @@ enum nfs4_callback_opnum {
OP_CB_ILLEGAL = 10044,
};
+struct cb_process_state {
+ __be32 drc_status;
+ struct nfs_client *clp;
+ struct nfs4_sessionid *svc_sid; /* v4.1 callback service sessionid */
+};
+
struct cb_compound_hdr_arg {
unsigned int taglen;
const char *tag;
unsigned int minorversion;
+ unsigned int cb_ident; /* v4.0 callback identifier */
unsigned nops;
};
@@ -103,14 +110,23 @@ struct cb_sequenceres {
uint32_t csr_target_highestslotid;
};
-extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
- struct cb_sequenceres *res);
+extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res,
+ struct cb_process_state *cps);
extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
const nfs4_stateid *stateid);
#define RCA4_TYPE_MASK_RDATA_DLG 0
#define RCA4_TYPE_MASK_WDATA_DLG 1
+#define RCA4_TYPE_MASK_DIR_DLG 2
+#define RCA4_TYPE_MASK_FILE_LAYOUT 3
+#define RCA4_TYPE_MASK_BLK_LAYOUT 4
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN 8
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15
+#define RCA4_TYPE_MASK_ALL 0xf31f
struct cb_recallanyargs {
struct sockaddr *craa_addr;
@@ -118,25 +134,52 @@ struct cb_recallanyargs {
uint32_t craa_type_mask;
};
-extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
+extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
+ void *dummy,
+ struct cb_process_state *cps);
struct cb_recallslotargs {
struct sockaddr *crsa_addr;
uint32_t crsa_target_max_slots;
};
-extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args,
- void *dummy);
+extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
+ void *dummy,
+ struct cb_process_state *cps);
+
+struct cb_layoutrecallargs {
+ struct sockaddr *cbl_addr;
+ uint32_t cbl_recall_type;
+ uint32_t cbl_layout_type;
+ uint32_t cbl_layoutchanged;
+ union {
+ struct {
+ struct nfs_fh cbl_fh;
+ struct pnfs_layout_range cbl_range;
+ nfs4_stateid cbl_stateid;
+ };
+ struct nfs_fsid cbl_fsid;
+ };
+};
-#endif /* CONFIG_NFS_V4_1 */
+extern unsigned nfs4_callback_layoutrecall(
+ struct cb_layoutrecallargs *args,
+ void *dummy, struct cb_process_state *cps);
-extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
-extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
+extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
+extern void nfs4_cb_take_slot(struct nfs_client *clp);
+#endif /* CONFIG_NFS_V4_1 */
+extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+ struct cb_getattrres *res,
+ struct cb_process_state *cps);
+extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+ struct cb_process_state *cps);
#ifdef CONFIG_NFS_V4
extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
extern void nfs_callback_down(int minorversion);
extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
const nfs4_stateid *stateid);
+extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
#endif /* CONFIG_NFS_V4 */
/*
* nfs41: Callbacks are expected to not cause substantial latency,
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2950fca0c61..4bb91cb2620 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -12,30 +12,33 @@
#include "callback.h"
#include "delegation.h"
#include "internal.h"
+#include "pnfs.h"
#ifdef NFS_DEBUG
#define NFSDBG_FACILITY NFSDBG_CALLBACK
#endif
-
-__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
+
+__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+ struct cb_getattrres *res,
+ struct cb_process_state *cps)
{
- struct nfs_client *clp;
struct nfs_delegation *delegation;
struct nfs_inode *nfsi;
struct inode *inode;
+ res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
+ goto out;
+
res->bitmap[0] = res->bitmap[1] = 0;
res->status = htonl(NFS4ERR_BADHANDLE);
- clp = nfs_find_client(args->addr, 4);
- if (clp == NULL)
- goto out;
dprintk("NFS: GETATTR callback request from %s\n",
- rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
- inode = nfs_delegation_find_inode(clp, &args->fh);
+ inode = nfs_delegation_find_inode(cps->clp, &args->fh);
if (inode == NULL)
- goto out_putclient;
+ goto out;
nfsi = NFS_I(inode);
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
@@ -55,49 +58,41 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
out_iput:
rcu_read_unlock();
iput(inode);
-out_putclient:
- nfs_put_client(clp);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
return res->status;
}
-__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
+__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+ struct cb_process_state *cps)
{
- struct nfs_client *clp;
struct inode *inode;
__be32 res;
- res = htonl(NFS4ERR_BADHANDLE);
- clp = nfs_find_client(args->addr, 4);
- if (clp == NULL)
+ res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
goto out;
dprintk("NFS: RECALL callback request from %s\n",
- rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
-
- do {
- struct nfs_client *prev = clp;
-
- inode = nfs_delegation_find_inode(clp, &args->fh);
- if (inode != NULL) {
- /* Set up a helper thread to actually return the delegation */
- switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
- case 0:
- res = 0;
- break;
- case -ENOENT:
- if (res != 0)
- res = htonl(NFS4ERR_BAD_STATEID);
- break;
- default:
- res = htonl(NFS4ERR_RESOURCE);
- }
- iput(inode);
- }
- clp = nfs_find_client_next(prev);
- nfs_put_client(prev);
- } while (clp != NULL);
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ res = htonl(NFS4ERR_BADHANDLE);
+ inode = nfs_delegation_find_inode(cps->clp, &args->fh);
+ if (inode == NULL)
+ goto out;
+ /* Set up a helper thread to actually return the delegation */
+ switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
+ case 0:
+ res = 0;
+ break;
+ case -ENOENT:
+ if (res != 0)
+ res = htonl(NFS4ERR_BAD_STATEID);
+ break;
+ default:
+ res = htonl(NFS4ERR_RESOURCE);
+ }
+ iput(inode);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
return res;
@@ -113,6 +108,139 @@ int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nf
#if defined(CONFIG_NFS_V4_1)
+static u32 initiate_file_draining(struct nfs_client *clp,
+ struct cb_layoutrecallargs *args)
+{
+ struct pnfs_layout_hdr *lo;
+ struct inode *ino;
+ bool found = false;
+ u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+ LIST_HEAD(free_me_list);
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+ if (nfs_compare_fh(&args->cbl_fh,
+ &NFS_I(lo->plh_inode)->fh))
+ continue;
+ ino = igrab(lo->plh_inode);
+ if (!ino)
+ continue;
+ found = true;
+ /* Without this, layout can be freed as soon
+ * as we release cl_lock.
+ */
+ get_layout_hdr(lo);
+ break;
+ }
+ spin_unlock(&clp->cl_lock);
+ if (!found)
+ return NFS4ERR_NOMATCHING_LAYOUT;
+
+ spin_lock(&ino->i_lock);
+ if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+ mark_matching_lsegs_invalid(lo, &free_me_list,
+ args->cbl_range.iomode))
+ rv = NFS4ERR_DELAY;
+ else
+ rv = NFS4ERR_NOMATCHING_LAYOUT;
+ pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
+ spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&free_me_list);
+ put_layout_hdr(lo);
+ iput(ino);
+ return rv;
+}
+
+static u32 initiate_bulk_draining(struct nfs_client *clp,
+ struct cb_layoutrecallargs *args)
+{
+ struct pnfs_layout_hdr *lo;
+ struct inode *ino;
+ u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+ struct pnfs_layout_hdr *tmp;
+ LIST_HEAD(recall_list);
+ LIST_HEAD(free_me_list);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+ if ((args->cbl_recall_type == RETURN_FSID) &&
+ memcmp(&NFS_SERVER(lo->plh_inode)->fsid,
+ &args->cbl_fsid, sizeof(struct nfs_fsid)))
+ continue;
+ if (!igrab(lo->plh_inode))
+ continue;
+ get_layout_hdr(lo);
+ BUG_ON(!list_empty(&lo->plh_bulk_recall));
+ list_add(&lo->plh_bulk_recall, &recall_list);
+ }
+ spin_unlock(&clp->cl_lock);
+ list_for_each_entry_safe(lo, tmp,
+ &recall_list, plh_bulk_recall) {
+ ino = lo->plh_inode;
+ spin_lock(&ino->i_lock);
+ set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+ if (mark_matching_lsegs_invalid(lo, &free_me_list, range.iomode))
+ rv = NFS4ERR_DELAY;
+ list_del_init(&lo->plh_bulk_recall);
+ spin_unlock(&ino->i_lock);
+ put_layout_hdr(lo);
+ iput(ino);
+ }
+ pnfs_free_lseg_list(&free_me_list);
+ return rv;
+}
+
+static u32 do_callback_layoutrecall(struct nfs_client *clp,
+ struct cb_layoutrecallargs *args)
+{
+ u32 res = NFS4ERR_DELAY;
+
+ dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
+ if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state))
+ goto out;
+ if (args->cbl_recall_type == RETURN_FILE)
+ res = initiate_file_draining(clp, args);
+ else
+ res = initiate_bulk_draining(clp, args);
+ clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state);
+out:
+ dprintk("%s returning %i\n", __func__, res);
+ return res;
+
+}
+
+__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
+ void *dummy, struct cb_process_state *cps)
+{
+ u32 res;
+
+ dprintk("%s: -->\n", __func__);
+
+ if (cps->clp)
+ res = do_callback_layoutrecall(cps->clp, args);
+ else
+ res = NFS4ERR_OP_NOT_IN_SESSION;
+
+ dprintk("%s: exit with status = %d\n", __func__, res);
+ return cpu_to_be32(res);
+}
+
+static void pnfs_recall_all_layouts(struct nfs_client *clp)
+{
+ struct cb_layoutrecallargs args;
+
+ /* Pretend we got a CB_LAYOUTRECALL(ALL) */
+ memset(&args, 0, sizeof(args));
+ args.cbl_recall_type = RETURN_ALL;
+ /* FIXME we ignore errors, what should we do? */
+ do_callback_layoutrecall(clp, &args);
+}
+
int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
{
if (delegation == NULL)
@@ -185,42 +313,6 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
}
/*
- * Returns a pointer to a held 'struct nfs_client' that matches the server's
- * address, major version number, and session ID. It is the caller's
- * responsibility to release the returned reference.
- *
- * Returns NULL if there are no connections with sessions, or if no session
- * matches the one of interest.
- */
- static struct nfs_client *find_client_with_session(
- const struct sockaddr *addr, u32 nfsversion,
- struct nfs4_sessionid *sessionid)
-{
- struct nfs_client *clp;
-
- clp = nfs_find_client(addr, 4);
- if (clp == NULL)
- return NULL;
-
- do {
- struct nfs_client *prev = clp;
-
- if (clp->cl_session != NULL) {
- if (memcmp(clp->cl_session->sess_id.data,
- sessionid->data,
- NFS4_MAX_SESSIONID_LEN) == 0) {
- /* Returns a held reference to clp */
- return clp;
- }
- }
- clp = nfs_find_client_next(prev);
- nfs_put_client(prev);
- } while (clp != NULL);
-
- return NULL;
-}
-
-/*
* For each referring call triple, check the session's slot table for
* a match. If the slot is in use and the sequence numbers match, the
* client is still waiting for a response to the original request.
@@ -276,20 +368,34 @@ out:
}
__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
- struct cb_sequenceres *res)
+ struct cb_sequenceres *res,
+ struct cb_process_state *cps)
{
struct nfs_client *clp;
int i;
__be32 status;
+ cps->clp = NULL;
+
status = htonl(NFS4ERR_BADSESSION);
- clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
+ /* Incoming session must match the callback session */
+ if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN))
+ goto out;
+
+ clp = nfs4_find_client_sessionid(args->csa_addr,
+ &args->csa_sessionid, 1);
if (clp == NULL)
goto out;
+ /* state manager is resetting the session */
+ if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+ status = NFS4ERR_DELAY;
+ goto out;
+ }
+
status = validate_seqid(&clp->cl_session->bc_slot_table, args);
if (status)
- goto out_putclient;
+ goto out;
/*
* Check for pending referring calls. If a match is found, a
@@ -298,7 +404,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
*/
if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
status = htonl(NFS4ERR_DELAY);
- goto out_putclient;
+ goto out;
}
memcpy(&res->csr_sessionid, &args->csa_sessionid,
@@ -307,83 +413,93 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
res->csr_slotid = args->csa_slotid;
res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+ nfs4_cb_take_slot(clp);
+ cps->clp = clp; /* put in nfs4_callback_compound */
-out_putclient:
- nfs_put_client(clp);
out:
for (i = 0; i < args->csa_nrclists; i++)
kfree(args->csa_rclists[i].rcl_refcalls);
kfree(args->csa_rclists);
- if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP))
- res->csr_status = 0;
- else
+ if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
+ cps->drc_status = status;
+ status = 0;
+ } else
res->csr_status = status;
+
dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
ntohl(status), ntohl(res->csr_status));
return status;
}
-__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
+static bool
+validate_bitmap_values(unsigned long mask)
+{
+ return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
+}
+
+__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
+ struct cb_process_state *cps)
{
- struct nfs_client *clp;
__be32 status;
fmode_t flags = 0;
- status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
- clp = nfs_find_client(args->craa_addr, 4);
- if (clp == NULL)
+ status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
+ if (!cps->clp) /* set in cb_sequence */
goto out;
dprintk("NFS: RECALL_ANY callback request from %s\n",
- rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ status = cpu_to_be32(NFS4ERR_INVAL);
+ if (!validate_bitmap_values(args->craa_type_mask))
+ goto out;
+ status = cpu_to_be32(NFS4_OK);
if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
&args->craa_type_mask))
flags = FMODE_READ;
if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
&args->craa_type_mask))
flags |= FMODE_WRITE;
-
+ if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
+ &args->craa_type_mask))
+ pnfs_recall_all_layouts(cps->clp);
if (flags)
- nfs_expire_all_delegation_types(clp, flags);
- status = htonl(NFS4_OK);
+ nfs_expire_all_delegation_types(cps->clp, flags);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
}
/* Reduce the fore channel's max_slots to the target value */
-__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy)
+__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
+ struct cb_process_state *cps)
{
- struct nfs_client *clp;
struct nfs4_slot_table *fc_tbl;
__be32 status;
status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
- clp = nfs_find_client(args->crsa_addr, 4);
- if (clp == NULL)
+ if (!cps->clp) /* set in cb_sequence */
goto out;
dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
- rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
args->crsa_target_max_slots);
- fc_tbl = &clp->cl_session->fc_slot_table;
+ fc_tbl = &cps->clp->cl_session->fc_slot_table;
status = htonl(NFS4ERR_BAD_HIGH_SLOT);
if (args->crsa_target_max_slots > fc_tbl->max_slots ||
args->crsa_target_max_slots < 1)
- goto out_putclient;
+ goto out;
status = htonl(NFS4_OK);
if (args->crsa_target_max_slots == fc_tbl->max_slots)
- goto out_putclient;
+ goto out;
fc_tbl->target_max_slots = args->crsa_target_max_slots;
- nfs41_handle_recall_slot(clp);
-out_putclient:
- nfs_put_client(clp); /* balance nfs_find_client */
+ nfs41_handle_recall_slot(cps->clp);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 05af212f0ed..23112c263f8 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -10,8 +10,10 @@
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/slab.h>
+#include <linux/sunrpc/bc_xprt.h>
#include "nfs4_fs.h"
#include "callback.h"
+#include "internal.h"
#define CB_OP_TAGLEN_MAXSZ (512)
#define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ)
@@ -22,6 +24,7 @@
#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#if defined(CONFIG_NFS_V4_1)
+#define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
4 + 1 + 3)
#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
@@ -33,7 +36,8 @@
/* Internal error code */
#define NFS4ERR_RESOURCE_HDR 11050
-typedef __be32 (*callback_process_op_t)(void *, void *);
+typedef __be32 (*callback_process_op_t)(void *, void *,
+ struct cb_process_state *);
typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
@@ -160,7 +164,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
hdr->minorversion = ntohl(*p++);
/* Check minor version is zero or one. */
if (hdr->minorversion <= 1) {
- p++; /* skip callback_ident */
+ hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
} else {
printk(KERN_WARNING "%s: NFSv4 server callback with "
"illegal minor version %u!\n",
@@ -220,6 +224,66 @@ out:
#if defined(CONFIG_NFS_V4_1)
+static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_layoutrecallargs *args)
+{
+ __be32 *p;
+ __be32 status = 0;
+ uint32_t iomode;
+
+ args->cbl_addr = svc_addr(rqstp);
+ p = read_buf(xdr, 4 * sizeof(uint32_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+
+ args->cbl_layout_type = ntohl(*p++);
+ /* Depite the spec's xdr, iomode really belongs in the FILE switch,
+ * as it is unuseable and ignored with the other types.
+ */
+ iomode = ntohl(*p++);
+ args->cbl_layoutchanged = ntohl(*p++);
+ args->cbl_recall_type = ntohl(*p++);
+
+ if (args->cbl_recall_type == RETURN_FILE) {
+ args->cbl_range.iomode = iomode;
+ status = decode_fh(xdr, &args->cbl_fh);
+ if (unlikely(status != 0))
+ goto out;
+
+ p = read_buf(xdr, 2 * sizeof(uint64_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ p = xdr_decode_hyper(p, &args->cbl_range.offset);
+ p = xdr_decode_hyper(p, &args->cbl_range.length);
+ status = decode_stateid(xdr, &args->cbl_stateid);
+ if (unlikely(status != 0))
+ goto out;
+ } else if (args->cbl_recall_type == RETURN_FSID) {
+ p = read_buf(xdr, 2 * sizeof(uint64_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ p = xdr_decode_hyper(p, &args->cbl_fsid.major);
+ p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
+ } else if (args->cbl_recall_type != RETURN_ALL) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n",
+ __func__,
+ args->cbl_layout_type, iomode,
+ args->cbl_layoutchanged, args->cbl_recall_type);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
static __be32 decode_sessionid(struct xdr_stream *xdr,
struct nfs4_sessionid *sid)
{
@@ -574,10 +638,10 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_SEQUENCE:
case OP_CB_RECALL_ANY:
case OP_CB_RECALL_SLOT:
+ case OP_CB_LAYOUTRECALL:
*op = &callback_ops[op_nr];
break;
- case OP_CB_LAYOUTRECALL:
case OP_CB_NOTIFY_DEVICEID:
case OP_CB_NOTIFY:
case OP_CB_PUSH_DELEG:
@@ -593,6 +657,37 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
return htonl(NFS_OK);
}
+static void nfs4_callback_free_slot(struct nfs4_session *session)
+{
+ struct nfs4_slot_table *tbl = &session->bc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ /*
+ * Let the state manager know callback processing done.
+ * A single slot, so highest used slotid is either 0 or -1
+ */
+ tbl->highest_used_slotid--;
+ nfs4_check_drain_bc_complete(session);
+ spin_unlock(&tbl->slot_tbl_lock);
+}
+
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+ if (clp && clp->cl_session)
+ nfs4_callback_free_slot(clp->cl_session);
+}
+
+/* A single slot, so highest used slotid is either 0 or -1 */
+void nfs4_cb_take_slot(struct nfs_client *clp)
+{
+ struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ tbl->highest_used_slotid++;
+ BUG_ON(tbl->highest_used_slotid != 0);
+ spin_unlock(&tbl->slot_tbl_lock);
+}
+
#else /* CONFIG_NFS_V4_1 */
static __be32
@@ -601,6 +696,9 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+}
#endif /* CONFIG_NFS_V4_1 */
static __be32
@@ -621,7 +719,8 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
static __be32 process_op(uint32_t minorversion, int nop,
struct svc_rqst *rqstp,
struct xdr_stream *xdr_in, void *argp,
- struct xdr_stream *xdr_out, void *resp, int* drc_status)
+ struct xdr_stream *xdr_out, void *resp,
+ struct cb_process_state *cps)
{
struct callback_op *op = &callback_ops[0];
unsigned int op_nr;
@@ -644,8 +743,8 @@ static __be32 process_op(uint32_t minorversion, int nop,
if (status)
goto encode_hdr;
- if (*drc_status) {
- status = *drc_status;
+ if (cps->drc_status) {
+ status = cps->drc_status;
goto encode_hdr;
}
@@ -653,16 +752,10 @@ static __be32 process_op(uint32_t minorversion, int nop,
if (maxlen > 0 && maxlen < PAGE_SIZE) {
status = op->decode_args(rqstp, xdr_in, argp);
if (likely(status == 0))
- status = op->process_op(argp, resp);
+ status = op->process_op(argp, resp, cps);
} else
status = htonl(NFS4ERR_RESOURCE);
- /* Only set by OP_CB_SEQUENCE processing */
- if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
- *drc_status = status;
- status = 0;
- }
-
encode_hdr:
res = encode_op_hdr(xdr_out, op_nr, status);
if (unlikely(res))
@@ -681,8 +774,11 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
struct cb_compound_hdr_arg hdr_arg = { 0 };
struct cb_compound_hdr_res hdr_res = { NULL };
struct xdr_stream xdr_in, xdr_out;
- __be32 *p;
- __be32 status, drc_status = 0;
+ __be32 *p, status;
+ struct cb_process_state cps = {
+ .drc_status = 0,
+ .clp = NULL,
+ };
unsigned int nops = 0;
dprintk("%s: start\n", __func__);
@@ -696,6 +792,13 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
if (status == __constant_htonl(NFS4ERR_RESOURCE))
return rpc_garbage_args;
+ if (hdr_arg.minorversion == 0) {
+ cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident);
+ if (!cps.clp)
+ return rpc_drop_reply;
+ } else
+ cps.svc_sid = bc_xprt_sid(rqstp);
+
hdr_res.taglen = hdr_arg.taglen;
hdr_res.tag = hdr_arg.tag;
if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
@@ -703,7 +806,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
while (status == 0 && nops != hdr_arg.nops) {
status = process_op(hdr_arg.minorversion, nops, rqstp,
- &xdr_in, argp, &xdr_out, resp, &drc_status);
+ &xdr_in, argp, &xdr_out, resp, &cps);
nops++;
}
@@ -716,6 +819,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
*hdr_res.status = status;
*hdr_res.nops = htonl(nops);
+ nfs4_cb_free_slot(cps.clp);
+ nfs_put_client(cps.clp);
dprintk("%s: done, status = %u\n", __func__, ntohl(status));
return rpc_success;
}
@@ -739,6 +844,12 @@ static struct callback_op callback_ops[] = {
.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
},
#if defined(CONFIG_NFS_V4_1)
+ [OP_CB_LAYOUTRECALL] = {
+ .process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
+ .decode_args =
+ (callback_decode_arg_t)decode_layoutrecall_args,
+ .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
+ },
[OP_CB_SEQUENCE] = {
.process_op = (callback_process_op_t)nfs4_callback_sequence,
.decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0870d0d4efc..192f2f86026 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -56,6 +56,30 @@ static DEFINE_SPINLOCK(nfs_client_lock);
static LIST_HEAD(nfs_client_list);
static LIST_HEAD(nfs_volume_list);
static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
+#ifdef CONFIG_NFS_V4
+static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */
+
+/*
+ * Get a unique NFSv4.0 callback identifier which will be used
+ * by the V4.0 callback service to lookup the nfs_client struct
+ */
+static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+{
+ int ret = 0;
+
+ if (clp->rpc_ops->version != 4 || minorversion != 0)
+ return ret;
+retry:
+ if (!idr_pre_get(&cb_ident_idr, GFP_KERNEL))
+ return -ENOMEM;
+ spin_lock(&nfs_client_lock);
+ ret = idr_get_new(&cb_ident_idr, clp, &clp->cl_cb_ident);
+ spin_unlock(&nfs_client_lock);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+}
+#endif /* CONFIG_NFS_V4 */
/*
* RPC cruft for NFS
@@ -144,7 +168,10 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_proto = cl_init->proto;
#ifdef CONFIG_NFS_V4
- INIT_LIST_HEAD(&clp->cl_delegations);
+ err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
+ if (err)
+ goto error_cleanup;
+
spin_lock_init(&clp->cl_lock);
INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
@@ -170,21 +197,17 @@ error_0:
}
#ifdef CONFIG_NFS_V4
-/*
- * Clears/puts all minor version specific parts from an nfs_client struct
- * reverting it to minorversion 0.
- */
-static void nfs4_clear_client_minor_version(struct nfs_client *clp)
-{
#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp)) {
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+ if (nfs4_has_session(clp))
nfs4_destroy_session(clp->cl_session);
- clp->cl_session = NULL;
- }
-
- clp->cl_mvops = nfs_v4_minor_ops[0];
-#endif /* CONFIG_NFS_V4_1 */
}
+#else /* CONFIG_NFS_V4_1 */
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
/*
* Destroy the NFS4 callback service
@@ -199,17 +222,49 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
{
if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
nfs4_kill_renewd(clp);
- nfs4_clear_client_minor_version(clp);
+ nfs4_shutdown_session(clp);
nfs4_destroy_callback(clp);
if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
nfs_idmap_delete(clp);
rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
}
+
+/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
+void nfs_cleanup_cb_ident_idr(void)
+{
+ idr_destroy(&cb_ident_idr);
+}
+
+/* nfs_client_lock held */
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+ if (clp->cl_cb_ident)
+ idr_remove(&cb_ident_idr, clp->cl_cb_ident);
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+ rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
+}
+
#else
static void nfs4_shutdown_client(struct nfs_client *clp)
{
}
+
+void nfs_cleanup_cb_ident_idr(void)
+{
+}
+
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+}
+
#endif /* CONFIG_NFS_V4 */
/*
@@ -248,6 +303,7 @@ void nfs_put_client(struct nfs_client *clp)
if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) {
list_del(&clp->cl_share_link);
+ nfs_cb_idr_remove_locked(clp);
spin_unlock(&nfs_client_lock);
BUG_ON(!list_empty(&clp->cl_superblocks));
@@ -363,70 +419,28 @@ static int nfs_sockaddr_cmp(const struct sockaddr *sa1,
return 0;
}
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
-{
- struct nfs_client *clp;
-
- spin_lock(&nfs_client_lock);
- list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
- struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
-
- /* Don't match clients that failed to initialise properly */
- if (!(clp->cl_cons_state == NFS_CS_READY ||
- clp->cl_cons_state == NFS_CS_SESSION_INITING))
- continue;
-
- /* Different NFS versions cannot share the same nfs_client */
- if (clp->rpc_ops->version != nfsversion)
- continue;
-
- /* Match only the IP address, not the port number */
- if (!nfs_sockaddr_match_ipaddr(addr, clap))
- continue;
-
- atomic_inc(&clp->cl_count);
- spin_unlock(&nfs_client_lock);
- return clp;
- }
- spin_unlock(&nfs_client_lock);
- return NULL;
-}
-
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client_next(struct nfs_client *clp)
+/* Common match routine for v4.0 and v4.1 callback services */
+bool
+nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp,
+ u32 minorversion)
{
- struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr;
- u32 nfsvers = clp->rpc_ops->version;
+ struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
- spin_lock(&nfs_client_lock);
- list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) {
- struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
+ /* Don't match clients that failed to initialise */
+ if (!(clp->cl_cons_state == NFS_CS_READY ||
+ clp->cl_cons_state == NFS_CS_SESSION_INITING))
+ return false;
- /* Don't match clients that failed to initialise properly */
- if (clp->cl_cons_state != NFS_CS_READY)
- continue;
+ /* Match the version and minorversion */
+ if (clp->rpc_ops->version != 4 ||
+ clp->cl_minorversion != minorversion)
+ return false;
- /* Different NFS versions cannot share the same nfs_client */
- if (clp->rpc_ops->version != nfsvers)
- continue;
+ /* Match only the IP address, not the port number */
+ if (!nfs_sockaddr_match_ipaddr(addr, clap))
+ return false;
- /* Match only the IP address, not the port number */
- if (!nfs_sockaddr_match_ipaddr(sap, clap))
- continue;
-
- atomic_inc(&clp->cl_count);
- spin_unlock(&nfs_client_lock);
- return clp;
- }
- spin_unlock(&nfs_client_lock);
- return NULL;
+ return true;
}
/*
@@ -988,6 +1002,27 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
target->options = source->options;
}
+static void nfs_server_insert_lists(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ spin_lock(&nfs_client_lock);
+ list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
+ list_add_tail(&server->master_link, &nfs_volume_list);
+ spin_unlock(&nfs_client_lock);
+
+}
+
+static void nfs_server_remove_lists(struct nfs_server *server)
+{
+ spin_lock(&nfs_client_lock);
+ list_del_rcu(&server->client_link);
+ list_del(&server->master_link);
+ spin_unlock(&nfs_client_lock);
+
+ synchronize_rcu();
+}
+
/*
* Allocate and initialise a server record
*/
@@ -1004,6 +1039,7 @@ static struct nfs_server *nfs_alloc_server(void)
/* Zero out the NFS state stuff */
INIT_LIST_HEAD(&server->client_link);
INIT_LIST_HEAD(&server->master_link);
+ INIT_LIST_HEAD(&server->delegations);
atomic_set(&server->active, 0);
@@ -1019,6 +1055,8 @@ static struct nfs_server *nfs_alloc_server(void)
return NULL;
}
+ pnfs_init_server(server);
+
return server;
}
@@ -1029,11 +1067,8 @@ void nfs_free_server(struct nfs_server *server)
{
dprintk("--> nfs_free_server()\n");
+ nfs_server_remove_lists(server);
unset_pnfs_layoutdriver(server);
- spin_lock(&nfs_client_lock);
- list_del(&server->client_link);
- list_del(&server->master_link);
- spin_unlock(&nfs_client_lock);
if (server->destroy != NULL)
server->destroy(server);
@@ -1108,11 +1143,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
- spin_lock(&nfs_client_lock);
- list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
- list_add_tail(&server->master_link, &nfs_volume_list);
- spin_unlock(&nfs_client_lock);
-
+ nfs_server_insert_lists(server);
server->mount_time = jiffies;
nfs_free_fattr(fattr);
return server;
@@ -1125,6 +1156,101 @@ error:
#ifdef CONFIG_NFS_V4
/*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by IP address, protocol version, and minorversion
+ *
+ * Called from the pg_authenticate method. The callback identifier
+ * is not used as it has not been decoded.
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_no_ident(const struct sockaddr *addr)
+{
+ struct nfs_client *clp;
+
+ spin_lock(&nfs_client_lock);
+ list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+ if (nfs4_cb_match_client(addr, clp, 0) == false)
+ continue;
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nfs_client_lock);
+ return clp;
+ }
+ spin_unlock(&nfs_client_lock);
+ return NULL;
+}
+
+/*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by callback identifier
+ */
+struct nfs_client *
+nfs4_find_client_ident(int cb_ident)
+{
+ struct nfs_client *clp;
+
+ spin_lock(&nfs_client_lock);
+ clp = idr_find(&cb_ident_idr, cb_ident);
+ if (clp)
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nfs_client_lock);
+ return clp;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * NFSv4.1 callback thread helper
+ * For CB_COMPOUND calls, find a client by IP address, protocol version,
+ * minorversion, and sessionID
+ *
+ * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service
+ * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL
+ * can arrive before the callback sessionid is set. For CB_NULL calls,
+ * find a client by IP address protocol version, and minorversion.
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+ struct nfs4_sessionid *sid, int is_cb_compound)
+{
+ struct nfs_client *clp;
+
+ spin_lock(&nfs_client_lock);
+ list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+ if (nfs4_cb_match_client(addr, clp, 1) == false)
+ continue;
+
+ if (!nfs4_has_session(clp))
+ continue;
+
+ /* Match sessionid unless cb_null call*/
+ if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data,
+ sid->data, NFS4_MAX_SESSIONID_LEN) != 0))
+ continue;
+
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nfs_client_lock);
+ return clp;
+ }
+ spin_unlock(&nfs_client_lock);
+ return NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+ struct nfs4_sessionid *sid, int is_cb_compound)
+{
+ return NULL;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
* Initialize the NFS4 callback service
*/
static int nfs4_init_callback(struct nfs_client *clp)
@@ -1342,11 +1468,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
server->namelen = NFS4_MAXNAMLEN;
- spin_lock(&nfs_client_lock);
- list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
- list_add_tail(&server->master_link, &nfs_volume_list);
- spin_unlock(&nfs_client_lock);
-
+ nfs_server_insert_lists(server);
server->mount_time = jiffies;
out:
nfs_free_fattr(fattr);
@@ -1551,11 +1673,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
if (error < 0)
goto out_free_server;
- spin_lock(&nfs_client_lock);
- list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
- list_add_tail(&server->master_link, &nfs_volume_list);
- spin_unlock(&nfs_client_lock);
-
+ nfs_server_insert_lists(server);
server->mount_time = jiffies;
nfs_free_fattr(fattr_fsinfo);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1fd62fc49be..364e4328f39 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -40,11 +40,23 @@ static void nfs_free_delegation(struct nfs_delegation *delegation)
call_rcu(&delegation->rcu, nfs_free_delegation_callback);
}
+/**
+ * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
+ * @delegation: delegation to process
+ *
+ */
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
{
set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
}
+/**
+ * nfs_have_delegation - check if inode has a delegation
+ * @inode: inode to check
+ * @flags: delegation types to check for
+ *
+ * Returns one if inode has the indicated delegation, otherwise zero.
+ */
int nfs_have_delegation(struct inode *inode, fmode_t flags)
{
struct nfs_delegation *delegation;
@@ -119,10 +131,15 @@ again:
return 0;
}
-/*
- * Set up a delegation on an inode
+/**
+ * nfs_inode_reclaim_delegation - process a delegation reclaim request
+ * @inode: inode to process
+ * @cred: credential to use for request
+ * @res: new delegation state from server
+ *
*/
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_openres *res)
{
struct nfs_delegation *delegation;
struct rpc_cred *oldcred = NULL;
@@ -175,38 +192,52 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
return inode;
}
-static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
- const nfs4_stateid *stateid,
- struct nfs_client *clp)
+static struct nfs_delegation *
+nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+ struct nfs_server *server)
{
struct nfs_delegation *delegation =
rcu_dereference_protected(nfsi->delegation,
- lockdep_is_held(&clp->cl_lock));
+ lockdep_is_held(&server->nfs_client->cl_lock));
if (delegation == NULL)
goto nomatch;
+
spin_lock(&delegation->lock);
- if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
- sizeof(delegation->stateid.data)) != 0)
- goto nomatch_unlock;
list_del_rcu(&delegation->super_list);
delegation->inode = NULL;
nfsi->delegation_state = 0;
rcu_assign_pointer(nfsi->delegation, NULL);
spin_unlock(&delegation->lock);
return delegation;
-nomatch_unlock:
- spin_unlock(&delegation->lock);
nomatch:
return NULL;
}
-/*
- * Set up a delegation on an inode
+static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
+ struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_delegation *delegation;
+
+ spin_lock(&clp->cl_lock);
+ delegation = nfs_detach_delegation_locked(nfsi, server);
+ spin_unlock(&clp->cl_lock);
+ return delegation;
+}
+
+/**
+ * nfs_inode_set_delegation - set up a delegation on an inode
+ * @inode: inode to which delegation applies
+ * @cred: cred to use for subsequent delegation processing
+ * @res: new delegation state from server
+ *
+ * Returns zero on success, or a negative errno value.
*/
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_client *clp = server->nfs_client;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation, *old_delegation;
struct nfs_delegation *freeme = NULL;
@@ -227,7 +258,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
spin_lock(&clp->cl_lock);
old_delegation = rcu_dereference_protected(nfsi->delegation,
- lockdep_is_held(&clp->cl_lock));
+ lockdep_is_held(&clp->cl_lock));
if (old_delegation != NULL) {
if (memcmp(&delegation->stateid, &old_delegation->stateid,
sizeof(old_delegation->stateid)) == 0 &&
@@ -246,9 +277,9 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
delegation = NULL;
goto out;
}
- freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
+ freeme = nfs_detach_delegation_locked(nfsi, server);
}
- list_add_rcu(&delegation->super_list, &clp->cl_delegations);
+ list_add_rcu(&delegation->super_list, &server->delegations);
nfsi->delegation_state = delegation->type;
rcu_assign_pointer(nfsi->delegation, delegation);
delegation = NULL;
@@ -290,73 +321,85 @@ out:
return err;
}
-/*
- * Return all delegations that have been marked for return
+/**
+ * nfs_client_return_marked_delegations - return previously marked delegations
+ * @clp: nfs_client to process
+ *
+ * Returns zero on success, or a negative errno value.
*/
int nfs_client_return_marked_delegations(struct nfs_client *clp)
{
struct nfs_delegation *delegation;
+ struct nfs_server *server;
struct inode *inode;
int err = 0;
restart:
rcu_read_lock();
- list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
- if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
- continue;
- inode = nfs_delegation_grab_inode(delegation);
- if (inode == NULL)
- continue;
- spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
- spin_unlock(&clp->cl_lock);
- rcu_read_unlock();
- if (delegation != NULL) {
- filemap_flush(inode->i_mapping);
- err = __nfs_inode_return_delegation(inode, delegation, 0);
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ list_for_each_entry_rcu(delegation, &server->delegations,
+ super_list) {
+ if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
+ &delegation->flags))
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+ continue;
+ delegation = nfs_detach_delegation(NFS_I(inode),
+ server);
+ rcu_read_unlock();
+
+ if (delegation != NULL) {
+ filemap_flush(inode->i_mapping);
+ err = __nfs_inode_return_delegation(inode,
+ delegation, 0);
+ }
+ iput(inode);
+ if (!err)
+ goto restart;
+ set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ return err;
}
- iput(inode);
- if (!err)
- goto restart;
- set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
- return err;
}
rcu_read_unlock();
return 0;
}
-/*
- * This function returns the delegation without reclaiming opens
- * or protecting against delegation reclaims.
- * It is therefore really only safe to be called from
- * nfs4_clear_inode()
+/**
+ * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
+ * @inode: inode to process
+ *
+ * Does not protect against delegation reclaims, therefore really only safe
+ * to be called from nfs4_clear_inode().
*/
void nfs_inode_return_delegation_noreclaim(struct inode *inode)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
if (rcu_access_pointer(nfsi->delegation) != NULL) {
- spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
- spin_unlock(&clp->cl_lock);
+ delegation = nfs_detach_delegation(nfsi, server);
if (delegation != NULL)
nfs_do_return_delegation(inode, delegation, 0);
}
}
+/**
+ * nfs_inode_return_delegation - synchronously return a delegation
+ * @inode: inode to process
+ *
+ * Returns zero on success, or a negative errno value.
+ */
int nfs_inode_return_delegation(struct inode *inode)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
int err = 0;
if (rcu_access_pointer(nfsi->delegation) != NULL) {
- spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
- spin_unlock(&clp->cl_lock);
+ delegation = nfs_detach_delegation(nfsi, server);
if (delegation != NULL) {
nfs_wb_all(inode);
err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -365,46 +408,61 @@ int nfs_inode_return_delegation(struct inode *inode)
return err;
}
-static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
+static void nfs_mark_return_delegation(struct nfs_delegation *delegation)
{
+ struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client;
+
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
}
-/*
- * Return all delegations associated to a super block
+/**
+ * nfs_super_return_all_delegations - return delegations for one superblock
+ * @sb: sb to process
+ *
*/
void nfs_super_return_all_delegations(struct super_block *sb)
{
- struct nfs_client *clp = NFS_SB(sb)->nfs_client;
+ struct nfs_server *server = NFS_SB(sb);
+ struct nfs_client *clp = server->nfs_client;
struct nfs_delegation *delegation;
if (clp == NULL)
return;
+
rcu_read_lock();
- list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
spin_lock(&delegation->lock);
- if (delegation->inode != NULL && delegation->inode->i_sb == sb)
- set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
spin_unlock(&delegation->lock);
}
rcu_read_unlock();
+
if (nfs_client_return_marked_delegations(clp) != 0)
nfs4_schedule_state_manager(clp);
}
-static
-void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
+static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
+ fmode_t flags)
{
struct nfs_delegation *delegation;
- rcu_read_lock();
- list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
continue;
if (delegation->type & flags)
- nfs_mark_return_delegation(clp, delegation);
+ nfs_mark_return_delegation(delegation);
}
+}
+
+static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
+ fmode_t flags)
+{
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs_mark_return_all_delegation_types(server, flags);
rcu_read_unlock();
}
@@ -419,19 +477,32 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp)
nfs4_schedule_state_manager(clp);
}
+/**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+ * @flags: delegation types to expire
+ *
+ */
void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
{
nfs_client_mark_return_all_delegation_types(clp, flags);
nfs_delegation_run_state_manager(clp);
}
+/**
+ * nfs_expire_all_delegations
+ * @clp: client to process
+ *
+ */
void nfs_expire_all_delegations(struct nfs_client *clp)
{
nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
}
-/*
- * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
+/**
+ * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
+ * @clp: client to process
+ *
*/
void nfs_handle_cb_pathdown(struct nfs_client *clp)
{
@@ -440,29 +511,43 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp)
nfs_client_mark_return_all_delegations(clp);
}
-static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
+static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
{
struct nfs_delegation *delegation;
- rcu_read_lock();
- list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
continue;
- nfs_mark_return_delegation(clp, delegation);
+ nfs_mark_return_delegation(delegation);
}
- rcu_read_unlock();
}
+/**
+ * nfs_expire_unreferenced_delegations - Eliminate unused delegations
+ * @clp: nfs_client to process
+ *
+ */
void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
{
- nfs_client_mark_return_unreferenced_delegations(clp);
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs_mark_return_unreferenced_delegations(server);
+ rcu_read_unlock();
+
nfs_delegation_run_state_manager(clp);
}
-/*
- * Asynchronous delegation recall!
+/**
+ * nfs_async_inode_return_delegation - asynchronously return a delegation
+ * @inode: inode to process
+ * @stateid: state ID information from CB_RECALL arguments
+ *
+ * Returns zero on success, or a negative errno value.
*/
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+int nfs_async_inode_return_delegation(struct inode *inode,
+ const nfs4_stateid *stateid)
{
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_delegation *delegation;
@@ -474,22 +559,21 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
rcu_read_unlock();
return -ENOENT;
}
-
- nfs_mark_return_delegation(clp, delegation);
+ nfs_mark_return_delegation(delegation);
rcu_read_unlock();
+
nfs_delegation_run_state_manager(clp);
return 0;
}
-/*
- * Retrieve the inode associated with a delegation
- */
-struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
+static struct inode *
+nfs_delegation_find_inode_server(struct nfs_server *server,
+ const struct nfs_fh *fhandle)
{
struct nfs_delegation *delegation;
struct inode *res = NULL;
- rcu_read_lock();
- list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
spin_lock(&delegation->lock);
if (delegation->inode != NULL &&
nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
@@ -499,49 +583,121 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs
if (res != NULL)
break;
}
+ return res;
+}
+
+/**
+ * nfs_delegation_find_inode - retrieve the inode associated with a delegation
+ * @clp: client state handle
+ * @fhandle: filehandle from a delegation recall
+ *
+ * Returns pointer to inode matching "fhandle," or NULL if a matching inode
+ * cannot be found.
+ */
+struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
+ const struct nfs_fh *fhandle)
+{
+ struct nfs_server *server;
+ struct inode *res = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ res = nfs_delegation_find_inode_server(server, fhandle);
+ if (res != NULL)
+ break;
+ }
rcu_read_unlock();
return res;
}
-/*
- * Mark all delegations as needing to be reclaimed
+static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
+{
+ struct nfs_delegation *delegation;
+
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+ set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+}
+
+/**
+ * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
+ * @clp: nfs_client to process
+ *
*/
void nfs_delegation_mark_reclaim(struct nfs_client *clp)
{
- struct nfs_delegation *delegation;
+ struct nfs_server *server;
+
rcu_read_lock();
- list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
- set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs_delegation_mark_reclaim_server(server);
rcu_read_unlock();
}
-/*
- * Reap all unclaimed delegations after reboot recovery is done
+/**
+ * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
+ * @clp: nfs_client to process
+ *
*/
void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
{
struct nfs_delegation *delegation;
+ struct nfs_server *server;
struct inode *inode;
+
restart:
rcu_read_lock();
- list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
- if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
- continue;
- inode = nfs_delegation_grab_inode(delegation);
- if (inode == NULL)
- continue;
- spin_lock(&clp->cl_lock);
- delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
- spin_unlock(&clp->cl_lock);
- rcu_read_unlock();
- if (delegation != NULL)
- nfs_free_delegation(delegation);
- iput(inode);
- goto restart;
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ list_for_each_entry_rcu(delegation, &server->delegations,
+ super_list) {
+ if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+ &delegation->flags) == 0)
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+ continue;
+ delegation = nfs_detach_delegation(NFS_I(inode),
+ server);
+ rcu_read_unlock();
+
+ if (delegation != NULL)
+ nfs_free_delegation(delegation);
+ iput(inode);
+ goto restart;
+ }
}
rcu_read_unlock();
}
+/**
+ * nfs_delegations_present - check for existence of delegations
+ * @clp: client state handle
+ *
+ * Returns one if there are any nfs_delegation structures attached
+ * to this nfs_client.
+ */
+int nfs_delegations_present(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ int ret = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ if (!list_empty(&server->delegations)) {
+ ret = 1;
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * nfs4_copy_delegation_stateid - Copy inode's state ID information
+ * @dst: stateid data structure to fill in
+ * @inode: inode to check
+ *
+ * Returns one and fills in "dst->data" * if inode had a delegation,
+ * otherwise zero is returned.
+ */
int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 2026304bda1..d9322e490c5 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -44,6 +44,7 @@ void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
void nfs_handle_cb_pathdown(struct nfs_client *clp);
int nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_delegations_present(struct nfs_client *clp);
void nfs_delegation_mark_reclaim(struct nfs_client *clp);
void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d33da530097..abe4f0c8dc5 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -33,8 +33,8 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/sched.h>
-#include <linux/vmalloc.h>
#include <linux/kmemleak.h>
+#include <linux/xattr.h>
#include "delegation.h"
#include "iostat.h"
@@ -125,9 +125,10 @@ const struct inode_operations nfs4_dir_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
- .getxattr = nfs4_getxattr,
- .setxattr = nfs4_setxattr,
- .listxattr = nfs4_listxattr,
+ .getxattr = generic_getxattr,
+ .setxattr = generic_setxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
};
#endif /* CONFIG_NFS_V4 */
@@ -172,7 +173,7 @@ struct nfs_cache_array {
struct nfs_cache_array_entry array[0];
};
-typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
typedef struct {
struct file *file;
struct page *page;
@@ -378,14 +379,14 @@ error:
return error;
}
-/* Fill in an entry based on the xdr code stored in desc->page */
-static
-int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct xdr_stream *stream)
+static int xdr_decode(nfs_readdir_descriptor_t *desc,
+ struct nfs_entry *entry, struct xdr_stream *xdr)
{
- __be32 *p = desc->decode(stream, entry, NFS_SERVER(desc->file->f_path.dentry->d_inode), desc->plus);
- if (IS_ERR(p))
- return PTR_ERR(p);
+ int error;
+ error = desc->decode(xdr, entry, desc->plus);
+ if (error)
+ return error;
entry->fattr->time_start = desc->timestamp;
entry->fattr->gencount = desc->gencount;
return 0;
@@ -459,25 +460,26 @@ out:
/* Perform conversion from xdr to cache array */
static
int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
- void *xdr_page, struct page *page, unsigned int buflen)
+ struct page **xdr_pages, struct page *page, unsigned int buflen)
{
struct xdr_stream stream;
- struct xdr_buf buf;
- __be32 *ptr = xdr_page;
+ struct xdr_buf buf = {
+ .pages = xdr_pages,
+ .page_len = buflen,
+ .buflen = buflen,
+ .len = buflen,
+ };
+ struct page *scratch;
struct nfs_cache_array *array;
unsigned int count = 0;
int status;
- buf.head->iov_base = xdr_page;
- buf.head->iov_len = buflen;
- buf.tail->iov_len = 0;
- buf.page_base = 0;
- buf.page_len = 0;
- buf.buflen = buf.head->iov_len;
- buf.len = buf.head->iov_len;
-
- xdr_init_decode(&stream, &buf, ptr);
+ scratch = alloc_page(GFP_KERNEL);
+ if (scratch == NULL)
+ return -ENOMEM;
+ xdr_init_decode(&stream, &buf, NULL);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
do {
status = xdr_decode(desc, entry, &stream);
@@ -506,6 +508,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
} else
status = PTR_ERR(array);
}
+
+ put_page(scratch);
return status;
}
@@ -521,7 +525,6 @@ static
void nfs_readdir_free_large_page(void *ptr, struct page **pages,
unsigned int npages)
{
- vm_unmap_ram(ptr, npages);
nfs_readdir_free_pagearray(pages, npages);
}
@@ -530,9 +533,8 @@ void nfs_readdir_free_large_page(void *ptr, struct page **pages,
* to nfs_readdir_free_large_page
*/
static
-void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
+int nfs_readdir_large_page(struct page **pages, unsigned int npages)
{
- void *ptr;
unsigned int i;
for (i = 0; i < npages; i++) {
@@ -541,13 +543,11 @@ void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
goto out_freepages;
pages[i] = page;
}
+ return 0;
- ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL);
- if (!IS_ERR_OR_NULL(ptr))
- return ptr;
out_freepages:
nfs_readdir_free_pagearray(pages, i);
- return NULL;
+ return -ENOMEM;
}
static
@@ -566,6 +566,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
entry.eof = 0;
entry.fh = nfs_alloc_fhandle();
entry.fattr = nfs_alloc_fattr();
+ entry.server = NFS_SERVER(inode);
if (entry.fh == NULL || entry.fattr == NULL)
goto out;
@@ -577,8 +578,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
memset(array, 0, sizeof(struct nfs_cache_array));
array->eof_index = -1;
- pages_ptr = nfs_readdir_large_page(pages, array_size);
- if (!pages_ptr)
+ status = nfs_readdir_large_page(pages, array_size);
+ if (status < 0)
goto out_release_array;
do {
unsigned int pglen;
@@ -587,7 +588,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
if (status < 0)
break;
pglen = status;
- status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen);
+ status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
if (status < 0) {
if (status == -ENOSPC)
status = 0;
@@ -1221,7 +1222,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
goto out_unblock_sillyrename;
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
- res = (struct dentry *)inode;
+ res = ERR_CAST(inode);
if (IS_ERR(res))
goto out_unblock_sillyrename;
@@ -1355,8 +1356,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
if (nd->flags & LOOKUP_CREATE) {
attr.ia_mode = nd->intent.open.create_mode;
attr.ia_valid = ATTR_MODE;
- if (!IS_POSIXACL(dir))
- attr.ia_mode &= ~current_umask();
+ attr.ia_mode &= ~current_umask();
} else {
open_flags &= ~(O_EXCL | O_CREAT);
attr.ia_valid = 0;
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 4e2d9b6b138..18696882f1c 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -238,7 +238,7 @@ int nfs_map_gid_to_group(struct nfs_client *clp, __u32 gid, char *buf, size_t bu
return nfs_idmap_lookup_name(gid, "group", buf, buflen);
}
-#else /* CONFIG_NFS_USE_IDMAPPER not defined */
+#else /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 017daa3bed3..ce00b704452 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1410,9 +1410,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
void nfs4_evict_inode(struct inode *inode)
{
+ pnfs_destroy_layout(NFS_I(inode));
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
- pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */
nfs_inode_return_delegation_noreclaim(inode);
/* First call standard NFS clear_inode() code */
@@ -1619,6 +1619,7 @@ static void __exit exit_nfs_fs(void)
#ifdef CONFIG_PROC_FS
rpc_proc_unregister("nfs");
#endif
+ nfs_cleanup_cb_ident_idr();
unregister_nfs_fs();
nfs_fs_proc_exit();
nfsiod_stop();
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e6356b750b7..bfa3a34af80 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -128,9 +128,13 @@ extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern struct rpc_program nfs_program;
+extern void nfs_cleanup_cb_ident_idr(void);
extern void nfs_put_client(struct nfs_client *);
-extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32);
-extern struct nfs_client *nfs_find_client_next(struct nfs_client *);
+extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *);
+extern struct nfs_client *nfs4_find_client_ident(int);
+extern struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *,
+ int);
extern struct nfs_server *nfs_create_server(
const struct nfs_parsed_mount_data *,
struct nfs_fh *);
@@ -185,17 +189,20 @@ extern int __init nfs_init_directcache(void);
extern void nfs_destroy_directcache(void);
/* nfs2xdr.c */
-extern int nfs_stat_to_errno(int);
+extern int nfs_stat_to_errno(enum nfs_stat);
extern struct rpc_procinfo nfs_procedures[];
-extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs2_decode_dirent(struct xdr_stream *,
+ struct nfs_entry *, int);
/* nfs3xdr.c */
extern struct rpc_procinfo nfs3_procedures[];
-extern __be32 *nfs3_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs3_decode_dirent(struct xdr_stream *,
+ struct nfs_entry *, int);
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs4_decode_dirent(struct xdr_stream *,
+ struct nfs_entry *, int);
#endif
#ifdef CONFIG_NFS_V4_1
extern const u32 nfs41_maxread_overhead;
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 4f981f1f668..d4c2d6b7507 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -236,10 +236,8 @@ void nfs_umount(const struct nfs_mount_request *info)
.authflavor = RPC_AUTH_UNIX,
.flags = RPC_CLNT_CREATE_NOPING,
};
- struct mountres result;
struct rpc_message msg = {
.rpc_argp = info->dirpath,
- .rpc_resp = &result,
};
struct rpc_clnt *clnt;
int status;
@@ -248,7 +246,7 @@ void nfs_umount(const struct nfs_mount_request *info)
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
clnt = rpc_create(&args);
- if (unlikely(IS_ERR(clnt)))
+ if (IS_ERR(clnt))
goto out_clnt_err;
dprintk("NFS: sending UMNT request for %s:%s\n",
@@ -280,29 +278,20 @@ out_call_err:
* XDR encode/decode functions for MOUNT
*/
-static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
{
const u32 pathname_len = strlen(pathname);
__be32 *p;
- if (unlikely(pathname_len > MNTPATHLEN))
- return -EIO;
-
- p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
- if (unlikely(p == NULL))
- return -EIO;
+ BUG_ON(pathname_len > MNTPATHLEN);
+ p = xdr_reserve_space(xdr, 4 + pathname_len);
xdr_encode_opaque(p, pathname, pathname_len);
-
- return 0;
}
-static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p,
- const char *dirpath)
+static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const char *dirpath)
{
- struct xdr_stream xdr;
-
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- return encode_mntdirpath(&xdr, dirpath);
+ encode_mntdirpath(xdr, dirpath);
}
/*
@@ -320,10 +309,10 @@ static int decode_status(struct xdr_stream *xdr, struct mountres *res)
u32 status;
__be32 *p;
- p = xdr_inline_decode(xdr, sizeof(status));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- status = ntohl(*p);
+ status = be32_to_cpup(p);
for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
if (mnt_errtbl[i].status == status) {
@@ -351,18 +340,16 @@ static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
return 0;
}
-static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p,
- struct mountres *res)
+static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct mountres *res)
{
- struct xdr_stream xdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
- status = decode_status(&xdr, res);
+ status = decode_status(xdr, res);
if (unlikely(status != 0 || res->errno != 0))
return status;
- return decode_fhandle(&xdr, res);
+ return decode_fhandle(xdr, res);
}
static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
@@ -371,10 +358,10 @@ static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
u32 status;
__be32 *p;
- p = xdr_inline_decode(xdr, sizeof(status));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- status = ntohl(*p);
+ status = be32_to_cpup(p);
for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
if (mnt3_errtbl[i].status == status) {
@@ -394,11 +381,11 @@ static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res)
u32 size;
__be32 *p;
- p = xdr_inline_decode(xdr, sizeof(size));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- size = ntohl(*p++);
+ size = be32_to_cpup(p);
if (size > NFS3_FHSIZE || size == 0)
return -EIO;
@@ -421,15 +408,15 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
if (*count == 0)
return 0;
- p = xdr_inline_decode(xdr, sizeof(entries));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- entries = ntohl(*p);
+ entries = be32_to_cpup(p);
dprintk("NFS: received %u auth flavors\n", entries);
if (entries > NFS_MAX_SECFLAVORS)
entries = NFS_MAX_SECFLAVORS;
- p = xdr_inline_decode(xdr, sizeof(u32) * entries);
+ p = xdr_inline_decode(xdr, 4 * entries);
if (unlikely(p == NULL))
return -EIO;
@@ -437,7 +424,7 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
entries = *count;
for (i = 0; i < entries; i++) {
- flavors[i] = ntohl(*p++);
+ flavors[i] = be32_to_cpup(p++);
dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]);
}
*count = i;
@@ -445,30 +432,28 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
return 0;
}
-static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p,
- struct mountres *res)
+static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct mountres *res)
{
- struct xdr_stream xdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
- status = decode_fhs_status(&xdr, res);
+ status = decode_fhs_status(xdr, res);
if (unlikely(status != 0 || res->errno != 0))
return status;
- status = decode_fhandle3(&xdr, res);
+ status = decode_fhandle3(xdr, res);
if (unlikely(status != 0)) {
res->errno = -EBADHANDLE;
return 0;
}
- return decode_auth_flavors(&xdr, res);
+ return decode_auth_flavors(xdr, res);
}
static struct rpc_procinfo mnt_procedures[] = {
[MOUNTPROC_MNT] = {
.p_proc = MOUNTPROC_MNT,
- .p_encode = (kxdrproc_t)mnt_enc_dirpath,
- .p_decode = (kxdrproc_t)mnt_dec_mountres,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres,
.p_arglen = MNT_enc_dirpath_sz,
.p_replen = MNT_dec_mountres_sz,
.p_statidx = MOUNTPROC_MNT,
@@ -476,7 +461,7 @@ static struct rpc_procinfo mnt_procedures[] = {
},
[MOUNTPROC_UMNT] = {
.p_proc = MOUNTPROC_UMNT,
- .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
.p_arglen = MNT_enc_dirpath_sz,
.p_statidx = MOUNTPROC_UMNT,
.p_name = "UMOUNT",
@@ -486,8 +471,8 @@ static struct rpc_procinfo mnt_procedures[] = {
static struct rpc_procinfo mnt3_procedures[] = {
[MOUNTPROC3_MNT] = {
.p_proc = MOUNTPROC3_MNT,
- .p_encode = (kxdrproc_t)mnt_enc_dirpath,
- .p_decode = (kxdrproc_t)mnt_dec_mountres3,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres3,
.p_arglen = MNT_enc_dirpath_sz,
.p_replen = MNT_dec_mountres3_sz,
.p_statidx = MOUNTPROC3_MNT,
@@ -495,7 +480,7 @@ static struct rpc_procinfo mnt3_procedures[] = {
},
[MOUNTPROC3_UMNT] = {
.p_proc = MOUNTPROC3_UMNT,
- .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
.p_arglen = MNT_enc_dirpath_sz,
.p_statidx = MOUNTPROC3_UMNT,
.p_name = "UMOUNT",
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5914a1911c9..792cb13a430 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,584 +61,1008 @@
#define NFS_readdirres_sz (1)
#define NFS_statfsres_sz (1+NFS_info_sz)
+
/*
- * Common NFS XDR functions as inlines
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
*/
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle)
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+ unsigned int base, unsigned int len,
+ unsigned int bufsize)
{
- memcpy(p, fhandle->data, NFS2_FHSIZE);
- return p + XDR_QUADLEN(NFS2_FHSIZE);
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
+ unsigned int replen;
+
+ replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
}
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle)
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
- /* NFSv2 handles have a fixed length */
- fhandle->size = NFS2_FHSIZE;
- memcpy(fhandle->data, p, NFS2_FHSIZE);
- return p + XDR_QUADLEN(NFS2_FHSIZE);
+ dprintk("NFS: %s prematurely hit the end of our receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NFSv2 basic data types
+ *
+ * Basic NFSv2 data types are defined in section 2.3 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions. For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+/*
+ * typedef opaque nfsdata<>;
+ */
+static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result)
+{
+ u32 recvd, count;
+ size_t hdrlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(count > recvd))
+ goto out_cheating;
+out:
+ xdr_read_pages(xdr, count);
+ result->eof = 0; /* NFSv2 does not pass EOF flag on the wire. */
+ result->count = count;
+ return count;
+out_cheating:
+ dprintk("NFS: server cheating in read result: "
+ "count %u > recvd %u\n", count, recvd);
+ count = recvd;
+ goto out;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * enum stat {
+ * NFS_OK = 0,
+ * NFSERR_PERM = 1,
+ * NFSERR_NOENT = 2,
+ * NFSERR_IO = 5,
+ * NFSERR_NXIO = 6,
+ * NFSERR_ACCES = 13,
+ * NFSERR_EXIST = 17,
+ * NFSERR_NODEV = 19,
+ * NFSERR_NOTDIR = 20,
+ * NFSERR_ISDIR = 21,
+ * NFSERR_FBIG = 27,
+ * NFSERR_NOSPC = 28,
+ * NFSERR_ROFS = 30,
+ * NFSERR_NAMETOOLONG = 63,
+ * NFSERR_NOTEMPTY = 66,
+ * NFSERR_DQUOT = 69,
+ * NFSERR_STALE = 70,
+ * NFSERR_WFLUSH = 99
+ * };
+ */
+static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *status = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static inline __be32*
-xdr_encode_time(__be32 *p, struct timespec *timep)
+/*
+ * 2.3.2. ftype
+ *
+ * enum ftype {
+ * NFNON = 0,
+ * NFREG = 1,
+ * NFDIR = 2,
+ * NFBLK = 3,
+ * NFCHR = 4,
+ * NFLNK = 5
+ * };
+ *
+ */
+static __be32 *xdr_decode_ftype(__be32 *p, u32 *type)
{
- *p++ = htonl(timep->tv_sec);
- /* Convert nanoseconds into microseconds */
- *p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0);
+ *type = be32_to_cpup(p++);
+ if (unlikely(*type > NF2FIFO))
+ *type = NFBAD;
return p;
}
-static inline __be32*
-xdr_encode_current_server_time(__be32 *p, struct timespec *timep)
+/*
+ * 2.3.3. fhandle
+ *
+ * typedef opaque fhandle[FHSIZE];
+ */
+static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh)
{
- /*
- * Passing the invalid value useconds=1000000 is a
- * Sun convention for "set to current server time".
- * It's needed to make permissions checks for the
- * "touch" program across v2 mounts to Solaris and
- * Irix boxes work correctly. See description of
- * sattr in section 6.1 of "NFS Illustrated" by
- * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
- */
- *p++ = htonl(timep->tv_sec);
- *p++ = htonl(1000000);
+ __be32 *p;
+
+ BUG_ON(fh->size != NFS2_FHSIZE);
+ p = xdr_reserve_space(xdr, NFS2_FHSIZE);
+ memcpy(p, fh->data, NFS2_FHSIZE);
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ fh->size = NFS2_FHSIZE;
+ memcpy(fh->data, p, NFS2_FHSIZE);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * 2.3.4. timeval
+ *
+ * struct timeval {
+ * unsigned int seconds;
+ * unsigned int useconds;
+ * };
+ */
+static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep)
+{
+ *p++ = cpu_to_be32(timep->tv_sec);
+ if (timep->tv_nsec != 0)
+ *p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC);
+ else
+ *p++ = cpu_to_be32(0);
return p;
}
-static inline __be32*
-xdr_decode_time(__be32 *p, struct timespec *timep)
+/*
+ * Passing the invalid value useconds=1000000 is a Sun convention for
+ * "set to current server time". It's needed to make permissions checks
+ * for the "touch" program across v2 mounts to Solaris and Irix servers
+ * work correctly. See description of sattr in section 6.1 of "NFS
+ * Illustrated" by Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5.
+ */
+static __be32 *xdr_encode_current_server_time(__be32 *p,
+ const struct timespec *timep)
{
- timep->tv_sec = ntohl(*p++);
- /* Convert microseconds into nanoseconds */
- timep->tv_nsec = ntohl(*p++) * 1000;
+ *p++ = cpu_to_be32(timep->tv_sec);
+ *p++ = cpu_to_be32(1000000);
return p;
}
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
+static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep)
+{
+ timep->tv_sec = be32_to_cpup(p++);
+ timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC;
+ return p;
+}
+
+/*
+ * 2.3.5. fattr
+ *
+ * struct fattr {
+ * ftype type;
+ * unsigned int mode;
+ * unsigned int nlink;
+ * unsigned int uid;
+ * unsigned int gid;
+ * unsigned int size;
+ * unsigned int blocksize;
+ * unsigned int rdev;
+ * unsigned int blocks;
+ * unsigned int fsid;
+ * unsigned int fileid;
+ * timeval atime;
+ * timeval mtime;
+ * timeval ctime;
+ * };
+ *
+ */
+static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
u32 rdev, type;
- type = ntohl(*p++);
- fattr->mode = ntohl(*p++);
- fattr->nlink = ntohl(*p++);
- fattr->uid = ntohl(*p++);
- fattr->gid = ntohl(*p++);
- fattr->size = ntohl(*p++);
- fattr->du.nfs2.blocksize = ntohl(*p++);
- rdev = ntohl(*p++);
- fattr->du.nfs2.blocks = ntohl(*p++);
- fattr->fsid.major = ntohl(*p++);
- fattr->fsid.minor = 0;
- fattr->fileid = ntohl(*p++);
- p = xdr_decode_time(p, &fattr->atime);
- p = xdr_decode_time(p, &fattr->mtime);
- p = xdr_decode_time(p, &fattr->ctime);
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
fattr->valid |= NFS_ATTR_FATTR_V2;
+
+ p = xdr_decode_ftype(p, &type);
+
+ fattr->mode = be32_to_cpup(p++);
+ fattr->nlink = be32_to_cpup(p++);
+ fattr->uid = be32_to_cpup(p++);
+ fattr->gid = be32_to_cpup(p++);
+ fattr->size = be32_to_cpup(p++);
+ fattr->du.nfs2.blocksize = be32_to_cpup(p++);
+
+ rdev = be32_to_cpup(p++);
fattr->rdev = new_decode_dev(rdev);
- if (type == NFCHR && rdev == NFS2_FIFO_DEV) {
+ if (type == (u32)NFCHR && rdev == (u32)NFS2_FIFO_DEV) {
fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
fattr->rdev = 0;
}
+
+ fattr->du.nfs2.blocks = be32_to_cpup(p++);
+ fattr->fsid.major = be32_to_cpup(p++);
+ fattr->fsid.minor = 0;
+ fattr->fileid = be32_to_cpup(p++);
+
+ p = xdr_decode_time(p, &fattr->atime);
+ p = xdr_decode_time(p, &fattr->mtime);
+ xdr_decode_time(p, &fattr->ctime);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * 2.3.6. sattr
+ *
+ * struct sattr {
+ * unsigned int mode;
+ * unsigned int uid;
+ * unsigned int gid;
+ * unsigned int size;
+ * timeval atime;
+ * timeval mtime;
+ * };
+ */
+
+#define NFS2_SATTR_NOT_SET (0xffffffff)
+
+static __be32 *xdr_time_not_set(__be32 *p)
+{
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
return p;
}
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
+static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr)
{
- const __be32 not_set = __constant_htonl(0xFFFFFFFF);
+ __be32 *p;
- *p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set;
- *p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
- *p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
- *p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
+ p = xdr_reserve_space(xdr, NFS_sattr_sz << 2);
- if (attr->ia_valid & ATTR_ATIME_SET) {
+ if (attr->ia_valid & ATTR_MODE)
+ *p++ = cpu_to_be32(attr->ia_mode);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ if (attr->ia_valid & ATTR_UID)
+ *p++ = cpu_to_be32(attr->ia_uid);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ if (attr->ia_valid & ATTR_GID)
+ *p++ = cpu_to_be32(attr->ia_gid);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ if (attr->ia_valid & ATTR_SIZE)
+ *p++ = cpu_to_be32((u32)attr->ia_size);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+
+ if (attr->ia_valid & ATTR_ATIME_SET)
p = xdr_encode_time(p, &attr->ia_atime);
- } else if (attr->ia_valid & ATTR_ATIME) {
+ else if (attr->ia_valid & ATTR_ATIME)
p = xdr_encode_current_server_time(p, &attr->ia_atime);
- } else {
- *p++ = not_set;
- *p++ = not_set;
- }
-
- if (attr->ia_valid & ATTR_MTIME_SET) {
- p = xdr_encode_time(p, &attr->ia_mtime);
- } else if (attr->ia_valid & ATTR_MTIME) {
- p = xdr_encode_current_server_time(p, &attr->ia_mtime);
- } else {
- *p++ = not_set;
- *p++ = not_set;
- }
- return p;
+ else
+ p = xdr_time_not_set(p);
+ if (attr->ia_valid & ATTR_MTIME_SET)
+ xdr_encode_time(p, &attr->ia_mtime);
+ else if (attr->ia_valid & ATTR_MTIME)
+ xdr_encode_current_server_time(p, &attr->ia_mtime);
+ else
+ xdr_time_not_set(p);
}
/*
- * NFS encode functions
+ * 2.3.7. filename
+ *
+ * typedef string filename<MAXNAMLEN>;
*/
+static void encode_filename(struct xdr_stream *xdr,
+ const char *name, u32 length)
+{
+ __be32 *p;
+
+ BUG_ON(length > NFS2_MAXNAMLEN);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, name, length);
+}
+
+static int decode_filename_inline(struct xdr_stream *xdr,
+ const char **name, u32 *length)
+{
+ __be32 *p;
+ u32 count;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ if (count > NFS3_MAXNAMLEN)
+ goto out_nametoolong;
+ p = xdr_inline_decode(xdr, count);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *name = (const char *)p;
+ *length = count;
+ return 0;
+out_nametoolong:
+ dprintk("NFS: returned filename too long: %u\n", count);
+ return -ENAMETOOLONG;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
/*
- * Encode file handle argument
- * GETATTR, READLINK, STATFS
+ * 2.3.8. path
+ *
+ * typedef string path<MAXPATHLEN>;
*/
-static int
-nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
+static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
{
- p = xdr_encode_fhandle(p, fh);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ __be32 *p;
+
+ BUG_ON(length > NFS2_MAXPATHLEN);
+ p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(length);
+ xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_path(struct xdr_stream *xdr)
+{
+ u32 length, recvd;
+ size_t hdrlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p);
+ if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
+ goto out_size;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(length > recvd))
+ goto out_cheating;
+
+ xdr_read_pages(xdr, length);
+ xdr_terminate_string(xdr->buf, length);
return 0;
+out_size:
+ dprintk("NFS: returned pathname too long: %u\n", length);
+ return -ENAMETOOLONG;
+out_cheating:
+ dprintk("NFS: server cheating in pathname result: "
+ "length %u > received %u\n", length, recvd);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
/*
- * Encode SETATTR arguments
+ * 2.3.9. attrstat
+ *
+ * union attrstat switch (stat status) {
+ * case NFS_OK:
+ * fattr attributes;
+ * default:
+ * void;
+ * };
*/
-static int
-nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args)
+static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_sattr(p, args->sattr);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_fattr(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
/*
- * Encode directory ops argument
- * LOOKUP, RMDIR
+ * 2.3.10. diropargs
+ *
+ * struct diropargs {
+ * fhandle dir;
+ * filename name;
+ * };
*/
-static int
-nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args)
+static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh,
+ const char *name, u32 length)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name, args->len);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_fhandle(xdr, fh);
+ encode_filename(xdr, name, length);
}
/*
- * Encode REMOVE argument
+ * 2.3.11. diropres
+ *
+ * union diropres switch (stat status) {
+ * case NFS_OK:
+ * struct {
+ * fhandle file;
+ * fattr attributes;
+ * } diropok;
+ * default:
+ * void;
+ * };
*/
-static int
-nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+static int decode_diropok(struct xdr_stream *xdr, struct nfs_diropok *result)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name.name, args->name.len);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ int error;
+
+ error = decode_fhandle(xdr, result->fh);
+ if (unlikely(error))
+ goto out;
+ error = decode_fattr(xdr, result->fattr);
+out:
+ return error;
+}
+
+static int decode_diropres(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_diropok(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
+
/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
+ * NFSv2 XDR encode functions
+ *
+ * NFSv2 argument types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
*/
-static int
-nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+
+static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_fh *fh)
{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
- u32 offset = (u32)args->offset;
+ encode_fhandle(xdr, fh);
+}
+
+/*
+ * 2.2.3. sattrargs
+ *
+ * struct sattrargs {
+ * fhandle file;
+ * sattr attributes;
+ * };
+ */
+static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_sattrargs *args)
+{
+ encode_fhandle(xdr, args->fh);
+ encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_diropargs *args)
+{
+ encode_diropargs(xdr, args->fh, args->name, args->len);
+}
+
+static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readlinkargs *args)
+{
+ encode_fhandle(xdr, args->fh);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->pglen, NFS_readlinkres_sz);
+}
+
+/*
+ * 2.2.7. readargs
+ *
+ * struct readargs {
+ * fhandle file;
+ * unsigned offset;
+ * unsigned count;
+ * unsigned totalcount;
+ * };
+ */
+static void encode_readargs(struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
+{
+ u32 offset = args->offset;
u32 count = args->count;
+ __be32 *p;
- p = xdr_encode_fhandle(p, args->fh);
- *p++ = htonl(offset);
- *p++ = htonl(count);
- *p++ = htonl(count);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ encode_fhandle(xdr, args->fh);
- /* Inline the page array */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
- args->pages, args->pgbase, count);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(offset);
+ *p++ = cpu_to_be32(count);
+ *p = cpu_to_be32(count);
+}
+
+static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
+{
+ encode_readargs(xdr, args);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->count, NFS_readres_sz);
req->rq_rcv_buf.flags |= XDRBUF_READ;
- return 0;
}
/*
- * Decode READ reply
+ * 2.2.9. writeargs
+ *
+ * struct writeargs {
+ * fhandle file;
+ * unsigned beginoffset;
+ * unsigned offset;
+ * unsigned totalcount;
+ * nfsdata data;
+ * };
*/
-static int
-nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
+static void encode_writeargs(struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
{
- struct kvec *iov = req->rq_rcv_buf.head;
- size_t hdrlen;
- u32 count, recvd;
- int status;
-
- if ((status = ntohl(*p++)))
- return nfs_stat_to_errno(status);
- p = xdr_decode_fattr(p, res->fattr);
-
- count = ntohl(*p++);
- res->eof = 0;
- hdrlen = (u8 *) p - (u8 *) iov->iov_base;
- if (iov->iov_len < hdrlen) {
- dprintk("NFS: READ reply header overflowed:"
- "length %Zu > %Zu\n", hdrlen, iov->iov_len);
- return -errno_NFSERR_IO;
- } else if (iov->iov_len != hdrlen) {
- dprintk("NFS: READ header is short. iovec will be shifted.\n");
- xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
- }
+ u32 offset = args->offset;
+ u32 count = args->count;
+ __be32 *p;
- recvd = req->rq_rcv_buf.len - hdrlen;
- if (count > recvd) {
- dprintk("NFS: server cheating in read reply: "
- "count %u > recvd %u\n", count, recvd);
- count = recvd;
- }
+ encode_fhandle(xdr, args->fh);
- dprintk("RPC: readres OK count %u\n", count);
- if (count < res->count)
- res->count = count;
+ p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+ *p++ = cpu_to_be32(offset);
+ *p++ = cpu_to_be32(offset);
+ *p++ = cpu_to_be32(count);
- return count;
+ /* nfsdata */
+ *p = cpu_to_be32(count);
+ xdr_write_pages(xdr, args->pages, args->pgbase, count);
}
+static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ encode_writeargs(xdr, args);
+ xdr->buf->flags |= XDRBUF_WRITE;
+}
/*
- * Write arguments. Splice the buffer to be written into the iovec.
+ * 2.2.10. createargs
+ *
+ * struct createargs {
+ * diropargs where;
+ * sattr attributes;
+ * };
*/
-static int
-nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_createargs *args)
{
- struct xdr_buf *sndbuf = &req->rq_snd_buf;
- u32 offset = (u32)args->offset;
- u32 count = args->count;
-
- p = xdr_encode_fhandle(p, args->fh);
- *p++ = htonl(offset);
- *p++ = htonl(offset);
- *p++ = htonl(count);
- *p++ = htonl(count);
- sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
+ encode_diropargs(xdr, args->fh, args->name, args->len);
+ encode_sattr(xdr, args->sattr);
+}
- /* Copy the page array */
- xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
- sndbuf->flags |= XDRBUF_WRITE;
- return 0;
+static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_removeargs *args)
+{
+ encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
}
/*
- * Encode create arguments
- * CREATE, MKDIR
+ * 2.2.12. renameargs
+ *
+ * struct renameargs {
+ * diropargs from;
+ * diropargs to;
+ * };
*/
-static int
-nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args)
+static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_renameargs *args)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name, args->len);
- p = xdr_encode_sattr(p, args->sattr);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ const struct qstr *old = args->old_name;
+ const struct qstr *new = args->new_name;
+
+ encode_diropargs(xdr, args->old_dir, old->name, old->len);
+ encode_diropargs(xdr, args->new_dir, new->name, new->len);
}
/*
- * Encode RENAME arguments
+ * 2.2.13. linkargs
+ *
+ * struct linkargs {
+ * fhandle from;
+ * diropargs to;
+ * };
*/
-static int
-nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
+static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_linkargs *args)
{
- p = xdr_encode_fhandle(p, args->old_dir);
- p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
- p = xdr_encode_fhandle(p, args->new_dir);
- p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_fhandle(xdr, args->fromfh);
+ encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
}
/*
- * Encode LINK arguments
+ * 2.2.14. symlinkargs
+ *
+ * struct symlinkargs {
+ * diropargs from;
+ * path to;
+ * sattr attributes;
+ * };
*/
-static int
-nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args)
+static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_symlinkargs *args)
{
- p = xdr_encode_fhandle(p, args->fromfh);
- p = xdr_encode_fhandle(p, args->tofh);
- p = xdr_encode_array(p, args->toname, args->tolen);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
+ encode_path(xdr, args->pages, args->pathlen);
+ encode_sattr(xdr, args->sattr);
}
/*
- * Encode SYMLINK arguments
+ * 2.2.17. readdirargs
+ *
+ * struct readdirargs {
+ * fhandle dir;
+ * nfscookie cookie;
+ * unsigned count;
+ * };
*/
-static int
-nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *args)
+static void encode_readdirargs(struct xdr_stream *xdr,
+ const struct nfs_readdirargs *args)
{
- struct xdr_buf *sndbuf = &req->rq_snd_buf;
- size_t pad;
+ __be32 *p;
- p = xdr_encode_fhandle(p, args->fromfh);
- p = xdr_encode_array(p, args->fromname, args->fromlen);
- *p++ = htonl(args->pathlen);
- sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
+ encode_fhandle(xdr, args->fh);
- xdr_encode_pages(sndbuf, args->pages, 0, args->pathlen);
+ p = xdr_reserve_space(xdr, 4 + 4);
+ *p++ = cpu_to_be32(args->cookie);
+ *p = cpu_to_be32(args->count);
+}
- /*
- * xdr_encode_pages may have added a few bytes to ensure the
- * pathname ends on a 4-byte boundary. Start encoding the
- * attributes after the pad bytes.
- */
- pad = sndbuf->tail->iov_len;
- if (pad > 0)
- p++;
- p = xdr_encode_sattr(p, args->sattr);
- sndbuf->len += xdr_adjust_iovec(sndbuf->tail, p) - pad;
- return 0;
+static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readdirargs *args)
+{
+ encode_readdirargs(xdr, args);
+ prepare_reply_buffer(req, args->pages, 0,
+ args->count, NFS_readdirres_sz);
}
/*
- * Encode arguments to readdir call
+ * NFSv2 XDR decode functions
+ *
+ * NFSv2 result types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
*/
-static int
-nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
+
+static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
+ void *__unused)
{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
- u32 count = args->count;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
- p = xdr_encode_fhandle(p, args->fh);
- *p++ = htonl(args->cookie);
- *p++ = htonl(count); /* see above */
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_fattr *result)
+{
+ return decode_attrstat(xdr, result);
+}
- /* Inline the page array */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
- return 0;
+static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_diropok *result)
+{
+ return decode_diropres(xdr, result);
}
/*
- * Decode the result of a readdir call.
- * We're not really decoding anymore, we just leave the buffer untouched
- * and only check that it is syntactically correct.
- * The real decoding happens in nfs_decode_entry below, called directly
- * from nfs_readdir for each entry.
+ * 2.2.6. readlinkres
+ *
+ * union readlinkres switch (stat status) {
+ * case NFS_OK:
+ * path data;
+ * default:
+ * void;
+ * };
*/
-static int
-nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
+static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
+ struct xdr_stream *xdr, void *__unused)
{
- struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
- struct page **page;
- size_t hdrlen;
- unsigned int pglen, recvd;
- int status;
-
- if ((status = ntohl(*p++)))
- return nfs_stat_to_errno(status);
-
- hdrlen = (u8 *) p - (u8 *) iov->iov_base;
- if (iov->iov_len < hdrlen) {
- dprintk("NFS: READDIR reply header overflowed:"
- "length %Zu > %Zu\n", hdrlen, iov->iov_len);
- return -errno_NFSERR_IO;
- } else if (iov->iov_len != hdrlen) {
- dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
- xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
- }
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_path(xdr);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
- pglen = rcvbuf->page_len;
- recvd = rcvbuf->len - hdrlen;
- if (pglen > recvd)
- pglen = recvd;
- page = rcvbuf->pages;
- return pglen;
+/*
+ * 2.2.7. readres
+ *
+ * union readres switch (stat status) {
+ * case NFS_OK:
+ * fattr attributes;
+ * nfsdata data;
+ * default:
+ * void;
+ * };
+ */
+static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_readres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_fattr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_nfsdata(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeres *result)
{
- dprintk("nfs: %s: prematurely hit end of receive buffer. "
- "Remaining buffer length is %tu words.\n",
- func, xdr->end - xdr->p);
+ /* All NFSv2 writes are "file sync" writes */
+ result->verf->committed = NFS_FILE_SYNC;
+ return decode_attrstat(xdr, result->fattr);
}
-__be32 *
-nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+/**
+ * nfs2_decode_dirent - Decode a single NFSv2 directory entry stored in
+ * the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 2.2.17. entry
+ *
+ * struct entry {
+ * unsigned fileid;
+ * filename name;
+ * nfscookie cookie;
+ * entry *nextentry;
+ * };
+ */
+int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ int plus)
{
__be32 *p;
+ int error;
+
p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
+ if (unlikely(p == NULL))
goto out_overflow;
- if (!ntohl(*p++)) {
+ if (*p++ == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
+ if (unlikely(p == NULL))
goto out_overflow;
- if (!ntohl(*p++))
- return ERR_PTR(-EAGAIN);
+ if (*p++ == xdr_zero)
+ return -EAGAIN;
entry->eof = 1;
- return ERR_PTR(-EBADCOOKIE);
+ return -EBADCOOKIE;
}
- p = xdr_inline_decode(xdr, 8);
- if (unlikely(!p))
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
goto out_overflow;
+ entry->ino = be32_to_cpup(p);
- entry->ino = ntohl(*p++);
- entry->len = ntohl(*p++);
+ error = decode_filename_inline(xdr, &entry->name, &entry->len);
+ if (unlikely(error))
+ return error;
- p = xdr_inline_decode(xdr, entry->len + 4);
- if (unlikely(!p))
+ /*
+ * The type (size and byte order) of nfscookie isn't defined in
+ * RFC 1094. This implementation assumes that it's an XDR uint32.
+ */
+ entry->prev_cookie = entry->cookie;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
goto out_overflow;
- entry->name = (const char *) p;
- p += XDR_QUADLEN(entry->len);
- entry->prev_cookie = entry->cookie;
- entry->cookie = ntohl(*p++);
+ entry->cookie = be32_to_cpup(p);
entry->d_type = DT_UNKNOWN;
- p = xdr_inline_peek(xdr, 8);
- if (p != NULL)
- entry->eof = !p[0] && p[1];
- else
- entry->eof = 0;
-
- return p;
+ return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
- return ERR_PTR(-EAGAIN);
-}
-
-/*
- * NFS XDR decode functions
- */
-/*
- * Decode simple status reply
- */
-static int
-nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
-{
- int status;
-
- if ((status = ntohl(*p++)) != 0)
- status = nfs_stat_to_errno(status);
- return status;
+ return -EAGAIN;
}
/*
- * Decode attrstat reply
- * GETATTR, SETATTR, WRITE
- */
-static int
-nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
-{
- int status;
-
- if ((status = ntohl(*p++)))
- return nfs_stat_to_errno(status);
- xdr_decode_fattr(p, fattr);
- return 0;
-}
-
-/*
- * Decode diropres reply
- * LOOKUP, CREATE, MKDIR
+ * 2.2.17. readdirres
+ *
+ * union readdirres switch (stat status) {
+ * case NFS_OK:
+ * struct {
+ * entry *entries;
+ * bool eof;
+ * } readdirok;
+ * default:
+ * void;
+ * };
+ *
+ * Read the directory contents into the page cache, but don't
+ * touch them. The actual decoding is done by nfs2_decode_dirent()
+ * during subsequent nfs_readdir() calls.
*/
-static int
-nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
+static int decode_readdirok(struct xdr_stream *xdr)
{
- int status;
+ u32 recvd, pglen;
+ size_t hdrlen;
- if ((status = ntohl(*p++)))
- return nfs_stat_to_errno(status);
- p = xdr_decode_fhandle(p, res->fh);
- xdr_decode_fattr(p, res->fattr);
- return 0;
+ pglen = xdr->buf->page_len;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(pglen > recvd))
+ goto out_cheating;
+out:
+ xdr_read_pages(xdr, pglen);
+ return pglen;
+out_cheating:
+ dprintk("NFS: server cheating in readdir result: "
+ "pglen %u > recvd %u\n", pglen, recvd);
+ pglen = recvd;
+ goto out;
}
-/*
- * Encode READLINK args
- */
-static int
-nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
+static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
+ struct xdr_stream *xdr, void *__unused)
{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
-
- p = xdr_encode_fhandle(p, args->fh);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
- /* Inline the page array */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
- return 0;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_readdirok(xdr);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode READLINK reply
+ * 2.2.18. statfsres
+ *
+ * union statfsres (stat status) {
+ * case NFS_OK:
+ * struct {
+ * unsigned tsize;
+ * unsigned bsize;
+ * unsigned blocks;
+ * unsigned bfree;
+ * unsigned bavail;
+ * } info;
+ * default:
+ * void;
+ * };
*/
-static int
-nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
+static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
{
- struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
- size_t hdrlen;
- u32 len, recvd;
- int status;
-
- if ((status = ntohl(*p++)))
- return nfs_stat_to_errno(status);
- /* Convert length of symlink */
- len = ntohl(*p++);
- if (len >= rcvbuf->page_len) {
- dprintk("nfs: server returned giant symlink!\n");
- return -ENAMETOOLONG;
- }
- hdrlen = (u8 *) p - (u8 *) iov->iov_base;
- if (iov->iov_len < hdrlen) {
- dprintk("NFS: READLINK reply header overflowed:"
- "length %Zu > %Zu\n", hdrlen, iov->iov_len);
- return -errno_NFSERR_IO;
- } else if (iov->iov_len != hdrlen) {
- dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
- xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
- }
- recvd = req->rq_rcv_buf.len - hdrlen;
- if (recvd < len) {
- dprintk("NFS: server cheating in readlink reply: "
- "count %u > recvd %u\n", len, recvd);
- return -EIO;
- }
+ __be32 *p;
- xdr_terminate_string(rcvbuf, len);
+ p = xdr_inline_decode(xdr, NFS_info_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->tsize = be32_to_cpup(p++);
+ result->bsize = be32_to_cpup(p++);
+ result->blocks = be32_to_cpup(p++);
+ result->bfree = be32_to_cpup(p++);
+ result->bavail = be32_to_cpup(p);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-/*
- * Decode WRITE reply
- */
-static int
-nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs2_fsstat *result)
{
- res->verf->committed = NFS_FILE_SYNC;
- return nfs_xdr_attrstat(req, p, res->fattr);
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_info(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
-/*
- * Decode STATFS reply
- */
-static int
-nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
-{
- int status;
-
- if ((status = ntohl(*p++)))
- return nfs_stat_to_errno(status);
-
- res->tsize = ntohl(*p++);
- res->bsize = ntohl(*p++);
- res->blocks = ntohl(*p++);
- res->bfree = ntohl(*p++);
- res->bavail = ntohl(*p++);
- return 0;
-}
/*
* We need to translate between nfs status return values and
* the local errno values which may not be the same.
*/
-static struct {
+static const struct {
int stat;
int errno;
} nfs_errtbl[] = {
@@ -678,28 +1102,30 @@ static struct {
{ -1, -EIO }
};
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
*/
-int
-nfs_stat_to_errno(int stat)
+int nfs_stat_to_errno(enum nfs_stat status)
{
int i;
for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == stat)
+ if (nfs_errtbl[i].stat == (int)status)
return nfs_errtbl[i].errno;
}
- dprintk("nfs_stat_to_errno: bad nfs status return value: %d\n", stat);
+ dprintk("NFS: Unrecognized nfs status value: %u\n", status);
return nfs_errtbl[i].errno;
}
#define PROC(proc, argtype, restype, timer) \
[NFSPROC_##proc] = { \
.p_proc = NFSPROC_##proc, \
- .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \
- .p_decode = (kxdrproc_t) nfs_xdr_##restype, \
+ .p_encode = (kxdreproc_t)nfs2_xdr_enc_##argtype, \
+ .p_decode = (kxdrdproc_t)nfs2_xdr_dec_##restype, \
.p_arglen = NFS_##argtype##_sz, \
.p_replen = NFS_##restype##_sz, \
.p_timer = timer, \
@@ -707,21 +1133,21 @@ nfs_stat_to_errno(int stat)
.p_name = #proc, \
}
struct rpc_procinfo nfs_procedures[] = {
- PROC(GETATTR, fhandle, attrstat, 1),
- PROC(SETATTR, sattrargs, attrstat, 0),
- PROC(LOOKUP, diropargs, diropres, 2),
- PROC(READLINK, readlinkargs, readlinkres, 3),
- PROC(READ, readargs, readres, 3),
- PROC(WRITE, writeargs, writeres, 4),
- PROC(CREATE, createargs, diropres, 0),
- PROC(REMOVE, removeargs, stat, 0),
- PROC(RENAME, renameargs, stat, 0),
- PROC(LINK, linkargs, stat, 0),
- PROC(SYMLINK, symlinkargs, stat, 0),
- PROC(MKDIR, createargs, diropres, 0),
- PROC(RMDIR, diropargs, stat, 0),
- PROC(READDIR, readdirargs, readdirres, 3),
- PROC(STATFS, fhandle, statfsres, 0),
+ PROC(GETATTR, fhandle, attrstat, 1),
+ PROC(SETATTR, sattrargs, attrstat, 0),
+ PROC(LOOKUP, diropargs, diropres, 2),
+ PROC(READLINK, readlinkargs, readlinkres, 3),
+ PROC(READ, readargs, readres, 3),
+ PROC(WRITE, writeargs, writeres, 4),
+ PROC(CREATE, createargs, diropres, 0),
+ PROC(REMOVE, removeargs, stat, 0),
+ PROC(RENAME, renameargs, stat, 0),
+ PROC(LINK, linkargs, stat, 0),
+ PROC(SYMLINK, symlinkargs, stat, 0),
+ PROC(MKDIR, createargs, diropres, 0),
+ PROC(RMDIR, diropargs, stat, 0),
+ PROC(READDIR, readdirargs, readdirres, 3),
+ PROC(STATFS, fhandle, statfsres, 0),
};
struct rpc_version nfs_version2 = {
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index f6cc60f06da..01c5e8b1941 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -37,18 +37,16 @@
#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
#define NFS3_fattr_sz (21)
-#define NFS3_wcc_attr_sz (6)
+#define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2)
+#define NFS3_wcc_attr_sz (6)
#define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz)
#define NFS3_post_op_attr_sz (1+NFS3_fattr_sz)
-#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
-#define NFS3_fsstat_sz
-#define NFS3_fsinfo_sz
-#define NFS3_pathconf_sz
-#define NFS3_entry_sz (NFS3_filename_sz+3)
-
-#define NFS3_sattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
#define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz)
-#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz)
+
+#define NFS3_getattrargs_sz (NFS3_fh_sz)
+#define NFS3_setattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_lookupargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_accessargs_sz (NFS3_fh_sz+1)
#define NFS3_readlinkargs_sz (NFS3_fh_sz)
#define NFS3_readargs_sz (NFS3_fh_sz+3)
@@ -57,14 +55,16 @@
#define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
#define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz)
#define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz)
+#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz)
#define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz)
-#define NFS3_readdirargs_sz (NFS3_fh_sz+2)
+#define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3)
+#define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4)
#define NFS3_commitargs_sz (NFS3_fh_sz+3)
-#define NFS3_attrstat_sz (1+NFS3_fattr_sz)
-#define NFS3_wccstat_sz (1+NFS3_wcc_data_sz)
-#define NFS3_removeres_sz (NFS3_wccstat_sz)
+#define NFS3_getattrres_sz (1+NFS3_fattr_sz)
+#define NFS3_setattrres_sz (1+NFS3_wcc_data_sz)
+#define NFS3_removeres_sz (NFS3_setattrres_sz)
#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
@@ -100,1079 +100,2362 @@ static const umode_t nfs_type2fmt[] = {
[NF3FIFO] = S_IFIFO,
};
+/*
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
+ */
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+ unsigned int base, unsigned int len,
+ unsigned int bufsize)
+{
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
+ unsigned int replen;
+
+ replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
- dprintk("nfs: %s: prematurely hit end of receive buffer. "
+ dprintk("NFS: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
+
/*
- * Common NFS XDR functions as inlines
+ * Encode/decode NFSv3 basic data types
+ *
+ * Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions. For run-time efficiency, some data types are encoded
+ * or decoded inline.
*/
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fh)
+
+static void encode_uint32(struct xdr_stream *xdr, u32 value)
{
- return xdr_encode_array(p, fh->data, fh->size);
+ __be32 *p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(value);
}
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh)
+static int decode_uint32(struct xdr_stream *xdr, u32 *value)
{
- if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) {
- memcpy(fh->data, p, fh->size);
- return p + XDR_QUADLEN(fh->size);
- }
- return NULL;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *value = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_uint64(struct xdr_stream *xdr, u64 *value)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ xdr_decode_hyper(p, value);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * fileid3
+ *
+ * typedef uint64 fileid3;
+ */
+static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
+{
+ return xdr_decode_hyper(p, fileid);
+}
+
+static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
+{
+ return decode_uint64(xdr, fileid);
+}
+
+/*
+ * filename3
+ *
+ * typedef string filename3<>;
+ */
+static void encode_filename3(struct xdr_stream *xdr,
+ const char *name, u32 length)
+{
+ __be32 *p;
+
+ BUG_ON(length > NFS3_MAXNAMLEN);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, name, length);
}
-static inline __be32 *
-xdr_decode_fhandle_stream(struct xdr_stream *xdr, struct nfs_fh *fh)
+static int decode_inline_filename3(struct xdr_stream *xdr,
+ const char **name, u32 *length)
{
__be32 *p;
+ u32 count;
+
p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ if (count > NFS3_MAXNAMLEN)
+ goto out_nametoolong;
+ p = xdr_inline_decode(xdr, count);
+ if (unlikely(p == NULL))
goto out_overflow;
- fh->size = ntohl(*p++);
+ *name = (const char *)p;
+ *length = count;
+ return 0;
- if (fh->size <= NFS3_FHSIZE) {
- p = xdr_inline_decode(xdr, fh->size);
- if (unlikely(!p))
- goto out_overflow;
- memcpy(fh->data, p, fh->size);
- return p + XDR_QUADLEN(fh->size);
- }
- return NULL;
+out_nametoolong:
+ dprintk("NFS: returned filename too long: %u\n", count);
+ return -ENAMETOOLONG;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * nfspath3
+ *
+ * typedef string nfspath3<>;
+ */
+static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
+ const u32 length)
+{
+ BUG_ON(length > NFS3_MAXPATHLEN);
+ encode_uint32(xdr, length);
+ xdr_write_pages(xdr, pages, 0, length);
+}
+static int decode_nfspath3(struct xdr_stream *xdr)
+{
+ u32 recvd, count;
+ size_t hdrlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
+ goto out_nametoolong;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(count > recvd))
+ goto out_cheating;
+
+ xdr_read_pages(xdr, count);
+ xdr_terminate_string(xdr->buf, count);
+ return 0;
+
+out_nametoolong:
+ dprintk("NFS: returned pathname too long: %u\n", count);
+ return -ENAMETOOLONG;
+out_cheating:
+ dprintk("NFS: server cheating in pathname result: "
+ "count %u > recvd %u\n", count, recvd);
+ return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
- return ERR_PTR(-EIO);
+ return -EIO;
}
/*
- * Encode/decode time.
+ * cookie3
+ *
+ * typedef uint64 cookie3
*/
-static inline __be32 *
-xdr_encode_time3(__be32 *p, struct timespec *timep)
+static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
{
- *p++ = htonl(timep->tv_sec);
- *p++ = htonl(timep->tv_nsec);
- return p;
+ return xdr_encode_hyper(p, cookie);
}
-static inline __be32 *
-xdr_decode_time3(__be32 *p, struct timespec *timep)
+static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
{
- timep->tv_sec = ntohl(*p++);
- timep->tv_nsec = ntohl(*p++);
- return p;
+ return decode_uint64(xdr, cookie);
+}
+
+/*
+ * cookieverf3
+ *
+ * typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
+ */
+static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
+{
+ memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
+ return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
+}
+
+static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * createverf3
+ *
+ * typedef opaque createverf3[NFS3_CREATEVERFSIZE];
+ */
+static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
+ memcpy(p, verifier, NFS3_CREATEVERFSIZE);
+}
+
+static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ memcpy(verifier, p, NFS3_WRITEVERFSIZE);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * size3
+ *
+ * typedef uint64 size3;
+ */
+static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
+{
+ return xdr_decode_hyper(p, size);
+}
+
+/*
+ * nfsstat3
+ *
+ * enum nfsstat3 {
+ * NFS3_OK = 0,
+ * ...
+ * }
+ */
+#define NFS3_OK NFS_OK
+
+static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *status = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * ftype3
+ *
+ * enum ftype3 {
+ * NF3REG = 1,
+ * NF3DIR = 2,
+ * NF3BLK = 3,
+ * NF3CHR = 4,
+ * NF3LNK = 5,
+ * NF3SOCK = 6,
+ * NF3FIFO = 7
+ * };
+ */
+static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
+{
+ BUG_ON(type > NF3FIFO);
+ encode_uint32(xdr, type);
}
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
+static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
{
- unsigned int type, major, minor;
- umode_t fmode;
+ u32 type;
- type = ntohl(*p++);
+ type = be32_to_cpup(p++);
if (type > NF3FIFO)
type = NF3NON;
- fmode = nfs_type2fmt[type];
- fattr->mode = (ntohl(*p++) & ~S_IFMT) | fmode;
- fattr->nlink = ntohl(*p++);
- fattr->uid = ntohl(*p++);
- fattr->gid = ntohl(*p++);
- p = xdr_decode_hyper(p, &fattr->size);
- p = xdr_decode_hyper(p, &fattr->du.nfs3.used);
-
- /* Turn remote device info into Linux-specific dev_t */
- major = ntohl(*p++);
- minor = ntohl(*p++);
- fattr->rdev = MKDEV(major, minor);
- if (MAJOR(fattr->rdev) != major || MINOR(fattr->rdev) != minor)
- fattr->rdev = 0;
+ *mode = nfs_type2fmt[type];
+ return p;
+}
- p = xdr_decode_hyper(p, &fattr->fsid.major);
- fattr->fsid.minor = 0;
- p = xdr_decode_hyper(p, &fattr->fileid);
- p = xdr_decode_time3(p, &fattr->atime);
- p = xdr_decode_time3(p, &fattr->mtime);
- p = xdr_decode_time3(p, &fattr->ctime);
+/*
+ * specdata3
+ *
+ * struct specdata3 {
+ * uint32 specdata1;
+ * uint32 specdata2;
+ * };
+ */
+static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
+{
+ __be32 *p;
- /* Update the mode bits */
- fattr->valid |= NFS_ATTR_FATTR_V3;
+ p = xdr_reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(MAJOR(rdev));
+ *p = cpu_to_be32(MINOR(rdev));
+}
+
+static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
+{
+ unsigned int major, minor;
+
+ major = be32_to_cpup(p++);
+ minor = be32_to_cpup(p++);
+ *rdev = MKDEV(major, minor);
+ if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
+ *rdev = 0;
+ return p;
+}
+
+/*
+ * nfs_fh3
+ *
+ * struct nfs_fh3 {
+ * opaque data<NFS3_FHSIZE>;
+ * };
+ */
+static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ BUG_ON(fh->size > NFS3_FHSIZE);
+ p = xdr_reserve_space(xdr, 4 + fh->size);
+ xdr_encode_opaque(p, fh->data, fh->size);
+}
+
+static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ u32 length;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p++);
+ if (unlikely(length > NFS3_FHSIZE))
+ goto out_toobig;
+ p = xdr_inline_decode(xdr, length);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ fh->size = length;
+ memcpy(fh->data, p, length);
+ return 0;
+out_toobig:
+ dprintk("NFS: file handle size (%u) too big\n", length);
+ return -E2BIG;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static void zero_nfs_fh3(struct nfs_fh *fh)
+{
+ memset(fh, 0, sizeof(*fh));
+}
+
+/*
+ * nfstime3
+ *
+ * struct nfstime3 {
+ * uint32 seconds;
+ * uint32 nseconds;
+ * };
+ */
+static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
+{
+ *p++ = cpu_to_be32(timep->tv_sec);
+ *p++ = cpu_to_be32(timep->tv_nsec);
return p;
}
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
+static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
{
+ timep->tv_sec = be32_to_cpup(p++);
+ timep->tv_nsec = be32_to_cpup(p++);
+ return p;
+}
+
+/*
+ * sattr3
+ *
+ * enum time_how {
+ * DONT_CHANGE = 0,
+ * SET_TO_SERVER_TIME = 1,
+ * SET_TO_CLIENT_TIME = 2
+ * };
+ *
+ * union set_mode3 switch (bool set_it) {
+ * case TRUE:
+ * mode3 mode;
+ * default:
+ * void;
+ * };
+ *
+ * union set_uid3 switch (bool set_it) {
+ * case TRUE:
+ * uid3 uid;
+ * default:
+ * void;
+ * };
+ *
+ * union set_gid3 switch (bool set_it) {
+ * case TRUE:
+ * gid3 gid;
+ * default:
+ * void;
+ * };
+ *
+ * union set_size3 switch (bool set_it) {
+ * case TRUE:
+ * size3 size;
+ * default:
+ * void;
+ * };
+ *
+ * union set_atime switch (time_how set_it) {
+ * case SET_TO_CLIENT_TIME:
+ * nfstime3 atime;
+ * default:
+ * void;
+ * };
+ *
+ * union set_mtime switch (time_how set_it) {
+ * case SET_TO_CLIENT_TIME:
+ * nfstime3 mtime;
+ * default:
+ * void;
+ * };
+ *
+ * struct sattr3 {
+ * set_mode3 mode;
+ * set_uid3 uid;
+ * set_gid3 gid;
+ * set_size3 size;
+ * set_atime atime;
+ * set_mtime mtime;
+ * };
+ */
+static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
+{
+ u32 nbytes;
+ __be32 *p;
+
+ /*
+ * In order to make only a single xdr_reserve_space() call,
+ * pre-compute the total number of bytes to be reserved.
+ * Six boolean values, one for each set_foo field, are always
+ * present in the encoded result, so start there.
+ */
+ nbytes = 6 * 4;
+ if (attr->ia_valid & ATTR_MODE)
+ nbytes += 4;
+ if (attr->ia_valid & ATTR_UID)
+ nbytes += 4;
+ if (attr->ia_valid & ATTR_GID)
+ nbytes += 4;
+ if (attr->ia_valid & ATTR_SIZE)
+ nbytes += 8;
+ if (attr->ia_valid & ATTR_ATIME_SET)
+ nbytes += 8;
+ if (attr->ia_valid & ATTR_MTIME_SET)
+ nbytes += 8;
+ p = xdr_reserve_space(xdr, nbytes);
+
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
- *p++ = htonl(attr->ia_mode & S_IALLUGO);
- } else {
+ *p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
+ } else
*p++ = xdr_zero;
- }
+
if (attr->ia_valid & ATTR_UID) {
*p++ = xdr_one;
- *p++ = htonl(attr->ia_uid);
- } else {
+ *p++ = cpu_to_be32(attr->ia_uid);
+ } else
*p++ = xdr_zero;
- }
+
if (attr->ia_valid & ATTR_GID) {
*p++ = xdr_one;
- *p++ = htonl(attr->ia_gid);
- } else {
+ *p++ = cpu_to_be32(attr->ia_gid);
+ } else
*p++ = xdr_zero;
- }
+
if (attr->ia_valid & ATTR_SIZE) {
*p++ = xdr_one;
- p = xdr_encode_hyper(p, (__u64) attr->ia_size);
- } else {
+ p = xdr_encode_hyper(p, (u64)attr->ia_size);
+ } else
*p++ = xdr_zero;
- }
+
if (attr->ia_valid & ATTR_ATIME_SET) {
*p++ = xdr_two;
- p = xdr_encode_time3(p, &attr->ia_atime);
+ p = xdr_encode_nfstime3(p, &attr->ia_atime);
} else if (attr->ia_valid & ATTR_ATIME) {
*p++ = xdr_one;
- } else {
+ } else
*p++ = xdr_zero;
- }
+
if (attr->ia_valid & ATTR_MTIME_SET) {
*p++ = xdr_two;
- p = xdr_encode_time3(p, &attr->ia_mtime);
+ xdr_encode_nfstime3(p, &attr->ia_mtime);
} else if (attr->ia_valid & ATTR_MTIME) {
- *p++ = xdr_one;
- } else {
- *p++ = xdr_zero;
- }
- return p;
+ *p = xdr_one;
+ } else
+ *p = xdr_zero;
+}
+
+/*
+ * fattr3
+ *
+ * struct fattr3 {
+ * ftype3 type;
+ * mode3 mode;
+ * uint32 nlink;
+ * uid3 uid;
+ * gid3 gid;
+ * size3 size;
+ * size3 used;
+ * specdata3 rdev;
+ * uint64 fsid;
+ * fileid3 fileid;
+ * nfstime3 atime;
+ * nfstime3 mtime;
+ * nfstime3 ctime;
+ * };
+ */
+static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ umode_t fmode;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
+ p = xdr_decode_ftype3(p, &fmode);
+
+ fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
+ fattr->nlink = be32_to_cpup(p++);
+ fattr->uid = be32_to_cpup(p++);
+ fattr->gid = be32_to_cpup(p++);
+
+ p = xdr_decode_size3(p, &fattr->size);
+ p = xdr_decode_size3(p, &fattr->du.nfs3.used);
+ p = xdr_decode_specdata3(p, &fattr->rdev);
+
+ p = xdr_decode_hyper(p, &fattr->fsid.major);
+ fattr->fsid.minor = 0;
+
+ p = xdr_decode_fileid3(p, &fattr->fileid);
+ p = xdr_decode_nfstime3(p, &fattr->atime);
+ p = xdr_decode_nfstime3(p, &fattr->mtime);
+ xdr_decode_nfstime3(p, &fattr->ctime);
+
+ fattr->valid |= NFS_ATTR_FATTR_V3;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static inline __be32 *
-xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * post_op_attr
+ *
+ * union post_op_attr switch (bool attributes_follow) {
+ * case TRUE:
+ * fattr3 attributes;
+ * case FALSE:
+ * void;
+ * };
+ */
+static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
- p = xdr_decode_hyper(p, &fattr->pre_size);
- p = xdr_decode_time3(p, &fattr->pre_mtime);
- p = xdr_decode_time3(p, &fattr->pre_ctime);
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p != xdr_zero)
+ return decode_fattr3(xdr, fattr);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * wcc_attr
+ * struct wcc_attr {
+ * size3 size;
+ * nfstime3 mtime;
+ * nfstime3 ctime;
+ * };
+ */
+static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
fattr->valid |= NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PREMTIME
| NFS_ATTR_FATTR_PRECTIME;
- return p;
-}
-static inline __be32 *
-xdr_decode_post_op_attr(__be32 *p, struct nfs_fattr *fattr)
-{
- if (*p++)
- p = xdr_decode_fattr(p, fattr);
- return p;
+ p = xdr_decode_size3(p, &fattr->pre_size);
+ p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
+ xdr_decode_nfstime3(p, &fattr->pre_ctime);
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static inline __be32 *
-xdr_decode_post_op_attr_stream(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+/*
+ * pre_op_attr
+ * union pre_op_attr switch (bool attributes_follow) {
+ * case TRUE:
+ * wcc_attr attributes;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * wcc_data
+ *
+ * struct wcc_data {
+ * pre_op_attr before;
+ * post_op_attr after;
+ * };
+ */
+static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
+ if (unlikely(p == NULL))
goto out_overflow;
- if (ntohl(*p++)) {
- p = xdr_inline_decode(xdr, 84);
- if (unlikely(!p))
- goto out_overflow;
- p = xdr_decode_fattr(p, fattr);
- }
- return p;
+ if (*p != xdr_zero)
+ return decode_wcc_attr(xdr, fattr);
+ return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
- return ERR_PTR(-EIO);
+ return -EIO;
}
-static inline __be32 *
-xdr_decode_pre_op_attr(__be32 *p, struct nfs_fattr *fattr)
+static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
{
- if (*p++)
- return xdr_decode_wcc_attr(p, fattr);
- return p;
+ int error;
+
+ error = decode_pre_op_attr(xdr, fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, fattr);
+out:
+ return error;
}
+/*
+ * post_op_fh3
+ *
+ * union post_op_fh3 switch (bool handle_follows) {
+ * case TRUE:
+ * nfs_fh3 handle;
+ * case FALSE:
+ * void;
+ * };
+ */
+static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p != xdr_zero)
+ return decode_nfs_fh3(xdr, fh);
+ zero_nfs_fh3(fh);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
-static inline __be32 *
-xdr_decode_wcc_data(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * diropargs3
+ *
+ * struct diropargs3 {
+ * nfs_fh3 dir;
+ * filename3 name;
+ * };
+ */
+static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
+ const char *name, u32 length)
{
- p = xdr_decode_pre_op_attr(p, fattr);
- return xdr_decode_post_op_attr(p, fattr);
+ encode_nfs_fh3(xdr, fh);
+ encode_filename3(xdr, name, length);
}
+
/*
- * NFS encode functions
+ * NFSv3 XDR encode functions
+ *
+ * NFSv3 argument types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
*/
/*
- * Encode file handle argument
+ * 3.3.1 GETATTR3args
+ *
+ * struct GETATTR3args {
+ * nfs_fh3 object;
+ * };
*/
-static int
-nfs3_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
+static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_fh *fh)
{
- p = xdr_encode_fhandle(p, fh);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_nfs_fh3(xdr, fh);
}
/*
- * Encode SETATTR arguments
+ * 3.3.2 SETATTR3args
+ *
+ * union sattrguard3 switch (bool check) {
+ * case TRUE:
+ * nfstime3 obj_ctime;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * struct SETATTR3args {
+ * nfs_fh3 object;
+ * sattr3 new_attributes;
+ * sattrguard3 guard;
+ * };
*/
-static int
-nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args)
-{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_sattr(p, args->sattr);
- *p++ = htonl(args->guard);
- if (args->guard)
- p = xdr_encode_time3(p, &args->guardtime);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+static void encode_sattrguard3(struct xdr_stream *xdr,
+ const struct nfs3_sattrargs *args)
+{
+ __be32 *p;
+
+ if (args->guard) {
+ p = xdr_reserve_space(xdr, 4 + 8);
+ *p++ = xdr_one;
+ xdr_encode_nfstime3(p, &args->guardtime);
+ } else {
+ p = xdr_reserve_space(xdr, 4);
+ *p = xdr_zero;
+ }
+}
+
+static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_sattrargs *args)
+{
+ encode_nfs_fh3(xdr, args->fh);
+ encode_sattr3(xdr, args->sattr);
+ encode_sattrguard3(xdr, args);
}
/*
- * Encode directory ops argument
+ * 3.3.3 LOOKUP3args
+ *
+ * struct LOOKUP3args {
+ * diropargs3 what;
+ * };
*/
-static int
-nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args)
+static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_diropargs *args)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name, args->len);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
}
/*
- * Encode REMOVE argument
+ * 3.3.4 ACCESS3args
+ *
+ * struct ACCESS3args {
+ * nfs_fh3 object;
+ * uint32 access;
+ * };
*/
-static int
-nfs3_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+static void encode_access3args(struct xdr_stream *xdr,
+ const struct nfs3_accessargs *args)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name.name, args->name.len);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_nfs_fh3(xdr, args->fh);
+ encode_uint32(xdr, args->access);
+}
+
+static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_accessargs *args)
+{
+ encode_access3args(xdr, args);
}
/*
- * Encode access() argument
+ * 3.3.5 READLINK3args
+ *
+ * struct READLINK3args {
+ * nfs_fh3 symlink;
+ * };
*/
-static int
-nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *args)
+static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_readlinkargs *args)
{
- p = xdr_encode_fhandle(p, args->fh);
- *p++ = htonl(args->access);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_nfs_fh3(xdr, args->fh);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->pglen, NFS3_readlinkres_sz);
}
/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
+ * 3.3.6 READ3args
+ *
+ * struct READ3args {
+ * nfs_fh3 file;
+ * offset3 offset;
+ * count3 count;
+ * };
*/
-static int
-nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void encode_read3args(struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
- u32 count = args->count;
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
- p = xdr_encode_fhandle(p, args->fh);
+ p = xdr_reserve_space(xdr, 8 + 4);
p = xdr_encode_hyper(p, args->offset);
- *p++ = htonl(count);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ *p = cpu_to_be32(args->count);
+}
- /* Inline the page array */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
- args->pages, args->pgbase, count);
+static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
+{
+ encode_read3args(xdr, args);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->count, NFS3_readres_sz);
req->rq_rcv_buf.flags |= XDRBUF_READ;
- return 0;
}
/*
- * Write arguments. Splice the buffer to be written into the iovec.
+ * 3.3.7 WRITE3args
+ *
+ * enum stable_how {
+ * UNSTABLE = 0,
+ * DATA_SYNC = 1,
+ * FILE_SYNC = 2
+ * };
+ *
+ * struct WRITE3args {
+ * nfs_fh3 file;
+ * offset3 offset;
+ * count3 count;
+ * stable_how stable;
+ * opaque data<>;
+ * };
*/
-static int
-nfs3_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void encode_write3args(struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
{
- struct xdr_buf *sndbuf = &req->rq_snd_buf;
- u32 count = args->count;
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
- p = xdr_encode_fhandle(p, args->fh);
+ p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
p = xdr_encode_hyper(p, args->offset);
- *p++ = htonl(count);
- *p++ = htonl(args->stable);
- *p++ = htonl(count);
- sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
- /* Copy the page array */
- xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
- sndbuf->flags |= XDRBUF_WRITE;
- return 0;
+ *p++ = cpu_to_be32(args->count);
+ *p++ = cpu_to_be32(args->stable);
+ *p = cpu_to_be32(args->count);
+ xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+}
+
+static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ encode_write3args(xdr, args);
+ xdr->buf->flags |= XDRBUF_WRITE;
}
/*
- * Encode CREATE arguments
+ * 3.3.8 CREATE3args
+ *
+ * enum createmode3 {
+ * UNCHECKED = 0,
+ * GUARDED = 1,
+ * EXCLUSIVE = 2
+ * };
+ *
+ * union createhow3 switch (createmode3 mode) {
+ * case UNCHECKED:
+ * case GUARDED:
+ * sattr3 obj_attributes;
+ * case EXCLUSIVE:
+ * createverf3 verf;
+ * };
+ *
+ * struct CREATE3args {
+ * diropargs3 where;
+ * createhow3 how;
+ * };
*/
-static int
-nfs3_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs3_createargs *args)
+static void encode_createhow3(struct xdr_stream *xdr,
+ const struct nfs3_createargs *args)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name, args->len);
-
- *p++ = htonl(args->createmode);
- if (args->createmode == NFS3_CREATE_EXCLUSIVE) {
- *p++ = args->verifier[0];
- *p++ = args->verifier[1];
- } else
- p = xdr_encode_sattr(p, args->sattr);
+ encode_uint32(xdr, args->createmode);
+ switch (args->createmode) {
+ case NFS3_CREATE_UNCHECKED:
+ case NFS3_CREATE_GUARDED:
+ encode_sattr3(xdr, args->sattr);
+ break;
+ case NFS3_CREATE_EXCLUSIVE:
+ encode_createverf3(xdr, args->verifier);
+ break;
+ default:
+ BUG();
+ }
+}
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_createargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+ encode_createhow3(xdr, args);
}
/*
- * Encode MKDIR arguments
+ * 3.3.9 MKDIR3args
+ *
+ * struct MKDIR3args {
+ * diropargs3 where;
+ * sattr3 attributes;
+ * };
*/
-static int
-nfs3_xdr_mkdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mkdirargs *args)
+static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_mkdirargs *args)
{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name, args->len);
- p = xdr_encode_sattr(p, args->sattr);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+ encode_sattr3(xdr, args->sattr);
}
/*
- * Encode SYMLINK arguments
+ * 3.3.10 SYMLINK3args
+ *
+ * struct symlinkdata3 {
+ * sattr3 symlink_attributes;
+ * nfspath3 symlink_data;
+ * };
+ *
+ * struct SYMLINK3args {
+ * diropargs3 where;
+ * symlinkdata3 symlink;
+ * };
*/
-static int
-nfs3_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_symlinkargs *args)
+static void encode_symlinkdata3(struct xdr_stream *xdr,
+ const struct nfs3_symlinkargs *args)
{
- p = xdr_encode_fhandle(p, args->fromfh);
- p = xdr_encode_array(p, args->fromname, args->fromlen);
- p = xdr_encode_sattr(p, args->sattr);
- *p++ = htonl(args->pathlen);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ encode_sattr3(xdr, args->sattr);
+ encode_nfspath3(xdr, args->pages, args->pathlen);
+}
- /* Copy the page */
- xdr_encode_pages(&req->rq_snd_buf, args->pages, 0, args->pathlen);
- return 0;
+static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_symlinkargs *args)
+{
+ encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
+ encode_symlinkdata3(xdr, args);
}
/*
- * Encode MKNOD arguments
+ * 3.3.11 MKNOD3args
+ *
+ * struct devicedata3 {
+ * sattr3 dev_attributes;
+ * specdata3 spec;
+ * };
+ *
+ * union mknoddata3 switch (ftype3 type) {
+ * case NF3CHR:
+ * case NF3BLK:
+ * devicedata3 device;
+ * case NF3SOCK:
+ * case NF3FIFO:
+ * sattr3 pipe_attributes;
+ * default:
+ * void;
+ * };
+ *
+ * struct MKNOD3args {
+ * diropargs3 where;
+ * mknoddata3 what;
+ * };
*/
-static int
-nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args)
-{
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_array(p, args->name, args->len);
- *p++ = htonl(args->type);
- p = xdr_encode_sattr(p, args->sattr);
- if (args->type == NF3CHR || args->type == NF3BLK) {
- *p++ = htonl(MAJOR(args->rdev));
- *p++ = htonl(MINOR(args->rdev));
+static void encode_devicedata3(struct xdr_stream *xdr,
+ const struct nfs3_mknodargs *args)
+{
+ encode_sattr3(xdr, args->sattr);
+ encode_specdata3(xdr, args->rdev);
+}
+
+static void encode_mknoddata3(struct xdr_stream *xdr,
+ const struct nfs3_mknodargs *args)
+{
+ encode_ftype3(xdr, args->type);
+ switch (args->type) {
+ case NF3CHR:
+ case NF3BLK:
+ encode_devicedata3(xdr, args);
+ break;
+ case NF3SOCK:
+ case NF3FIFO:
+ encode_sattr3(xdr, args->sattr);
+ break;
+ case NF3REG:
+ case NF3DIR:
+ break;
+ default:
+ BUG();
}
+}
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_mknodargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+ encode_mknoddata3(xdr, args);
}
/*
- * Encode RENAME arguments
+ * 3.3.12 REMOVE3args
+ *
+ * struct REMOVE3args {
+ * diropargs3 object;
+ * };
*/
-static int
-nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
-{
- p = xdr_encode_fhandle(p, args->old_dir);
- p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
- p = xdr_encode_fhandle(p, args->new_dir);
- p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_removeargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
}
/*
- * Encode LINK arguments
+ * 3.3.14 RENAME3args
+ *
+ * struct RENAME3args {
+ * diropargs3 from;
+ * diropargs3 to;
+ * };
*/
-static int
-nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args)
+static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_renameargs *args)
{
- p = xdr_encode_fhandle(p, args->fromfh);
- p = xdr_encode_fhandle(p, args->tofh);
- p = xdr_encode_array(p, args->toname, args->tolen);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ const struct qstr *old = args->old_name;
+ const struct qstr *new = args->new_name;
+
+ encode_diropargs3(xdr, args->old_dir, old->name, old->len);
+ encode_diropargs3(xdr, args->new_dir, new->name, new->len);
}
/*
- * Encode arguments to readdir call
+ * 3.3.15 LINK3args
+ *
+ * struct LINK3args {
+ * nfs_fh3 file;
+ * diropargs3 link;
+ * };
*/
-static int
-nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
+static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_linkargs *args)
{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
- u32 count = args->count;
-
- p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_hyper(p, args->cookie);
- *p++ = args->verf[0];
- *p++ = args->verf[1];
- if (args->plus) {
- /* readdirplus: need dircount + buffer size.
- * We just make sure we make dircount big enough */
- *p++ = htonl(count >> 3);
- }
- *p++ = htonl(count);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
- /* Inline the page array */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
- return 0;
+ encode_nfs_fh3(xdr, args->fromfh);
+ encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
}
/*
- * Decode the result of a readdir call.
- * We just check for syntactical correctness.
+ * 3.3.16 READDIR3args
+ *
+ * struct READDIR3args {
+ * nfs_fh3 dir;
+ * cookie3 cookie;
+ * cookieverf3 cookieverf;
+ * count3 count;
+ * };
*/
-static int
-nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res)
+static void encode_readdir3args(struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
{
- struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
- struct page **page;
- size_t hdrlen;
- u32 recvd, pglen;
- int status;
-
- status = ntohl(*p++);
- /* Decode post_op_attrs */
- p = xdr_decode_post_op_attr(p, res->dir_attr);
- if (status)
- return nfs_stat_to_errno(status);
- /* Decode verifier cookie */
- if (res->verf) {
- res->verf[0] = *p++;
- res->verf[1] = *p++;
- } else {
- p += 2;
- }
+ __be32 *p;
- hdrlen = (u8 *) p - (u8 *) iov->iov_base;
- if (iov->iov_len < hdrlen) {
- dprintk("NFS: READDIR reply header overflowed:"
- "length %Zu > %Zu\n", hdrlen, iov->iov_len);
- return -errno_NFSERR_IO;
- } else if (iov->iov_len != hdrlen) {
- dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
- xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
- }
+ encode_nfs_fh3(xdr, args->fh);
- pglen = rcvbuf->page_len;
- recvd = rcvbuf->len - hdrlen;
- if (pglen > recvd)
- pglen = recvd;
- page = rcvbuf->pages;
+ p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
+ p = xdr_encode_cookie3(p, args->cookie);
+ p = xdr_encode_cookieverf3(p, args->verf);
+ *p = cpu_to_be32(args->count);
+}
- return pglen;
+static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
+{
+ encode_readdir3args(xdr, args);
+ prepare_reply_buffer(req, args->pages, 0,
+ args->count, NFS3_readdirres_sz);
}
-__be32 *
-nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+/*
+ * 3.3.17 READDIRPLUS3args
+ *
+ * struct READDIRPLUS3args {
+ * nfs_fh3 dir;
+ * cookie3 cookie;
+ * cookieverf3 cookieverf;
+ * count3 dircount;
+ * count3 maxcount;
+ * };
+ */
+static void encode_readdirplus3args(struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
{
__be32 *p;
- struct nfs_entry old = *entry;
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- goto out_overflow;
- if (!ntohl(*p++)) {
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- goto out_overflow;
- if (!ntohl(*p++))
- return ERR_PTR(-EAGAIN);
- entry->eof = 1;
- return ERR_PTR(-EBADCOOKIE);
- }
- p = xdr_inline_decode(xdr, 12);
- if (unlikely(!p))
- goto out_overflow;
- p = xdr_decode_hyper(p, &entry->ino);
- entry->len = ntohl(*p++);
+ encode_nfs_fh3(xdr, args->fh);
- p = xdr_inline_decode(xdr, entry->len + 8);
- if (unlikely(!p))
- goto out_overflow;
- entry->name = (const char *) p;
- p += XDR_QUADLEN(entry->len);
- entry->prev_cookie = entry->cookie;
- p = xdr_decode_hyper(p, &entry->cookie);
-
- entry->d_type = DT_UNKNOWN;
- if (plus) {
- entry->fattr->valid = 0;
- p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
- if (IS_ERR(p))
- goto out_overflow_exit;
- entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
- /* In fact, a post_op_fh3: */
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- goto out_overflow;
- if (*p++) {
- p = xdr_decode_fhandle_stream(xdr, entry->fh);
- if (IS_ERR(p))
- goto out_overflow_exit;
- /* Ugh -- server reply was truncated */
- if (p == NULL) {
- dprintk("NFS: FH truncated\n");
- *entry = old;
- return ERR_PTR(-EAGAIN);
- }
- } else
- memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
- }
+ p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
+ p = xdr_encode_cookie3(p, args->cookie);
+ p = xdr_encode_cookieverf3(p, args->verf);
- p = xdr_inline_peek(xdr, 8);
- if (p != NULL)
- entry->eof = !p[0] && p[1];
- else
- entry->eof = 0;
+ /*
+ * readdirplus: need dircount + buffer size.
+ * We just make sure we make dircount big enough
+ */
+ *p++ = cpu_to_be32(args->count >> 3);
- return p;
+ *p = cpu_to_be32(args->count);
+}
-out_overflow:
- print_overflow_msg(__func__, xdr);
-out_overflow_exit:
- return ERR_PTR(-EAGAIN);
+static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
+{
+ encode_readdirplus3args(xdr, args);
+ prepare_reply_buffer(req, args->pages, 0,
+ args->count, NFS3_readdirres_sz);
}
/*
- * Encode COMMIT arguments
+ * 3.3.21 COMMIT3args
+ *
+ * struct COMMIT3args {
+ * nfs_fh3 file;
+ * offset3 offset;
+ * count3 count;
+ * };
*/
-static int
-nfs3_xdr_commitargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void encode_commit3args(struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
{
- p = xdr_encode_fhandle(p, args->fh);
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 8 + 4);
p = xdr_encode_hyper(p, args->offset);
- *p++ = htonl(args->count);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- return 0;
+ *p = cpu_to_be32(args->count);
}
-#ifdef CONFIG_NFS_V3_ACL
-/*
- * Encode GETACL arguments
- */
-static int
-nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
- struct nfs3_getaclargs *args)
+static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
+ encode_commit3args(xdr, args);
+}
- p = xdr_encode_fhandle(p, args->fh);
- *p++ = htonl(args->mask);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+#ifdef CONFIG_NFS_V3_ACL
- if (args->mask & (NFS_ACL | NFS_DFACL)) {
- /* Inline the page array */
- replen = (RPC_REPHDRSIZE + auth->au_rslack +
- ACL3_getaclres_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0,
- NFSACL_MAXPAGES << PAGE_SHIFT);
- }
- return 0;
+static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_getaclargs *args)
+{
+ encode_nfs_fh3(xdr, args->fh);
+ encode_uint32(xdr, args->mask);
+ if (args->mask & (NFS_ACL | NFS_DFACL))
+ prepare_reply_buffer(req, args->pages, 0,
+ NFSACL_MAXPAGES << PAGE_SHIFT,
+ ACL3_getaclres_sz);
}
-/*
- * Encode SETACL arguments
- */
-static int
-nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p,
- struct nfs3_setaclargs *args)
+static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_setaclargs *args)
{
- struct xdr_buf *buf = &req->rq_snd_buf;
unsigned int base;
- int err;
-
- p = xdr_encode_fhandle(p, NFS_FH(args->inode));
- *p++ = htonl(args->mask);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
- base = req->rq_slen;
+ int error;
+ encode_nfs_fh3(xdr, NFS_FH(args->inode));
+ encode_uint32(xdr, args->mask);
if (args->npages != 0)
- xdr_encode_pages(buf, args->pages, 0, args->len);
- else
- req->rq_slen = xdr_adjust_iovec(req->rq_svec,
- p + XDR_QUADLEN(args->len));
+ xdr_write_pages(xdr, args->pages, 0, args->len);
- err = nfsacl_encode(buf, base, args->inode,
+ base = req->rq_slen;
+ error = nfsacl_encode(xdr->buf, base, args->inode,
(args->mask & NFS_ACL) ?
args->acl_access : NULL, 1, 0);
- if (err > 0)
- err = nfsacl_encode(buf, base + err, args->inode,
- (args->mask & NFS_DFACL) ?
- args->acl_default : NULL, 1,
- NFS_ACL_DEFAULT);
- return (err > 0) ? 0 : err;
+ BUG_ON(error < 0);
+ error = nfsacl_encode(xdr->buf, base + error, args->inode,
+ (args->mask & NFS_DFACL) ?
+ args->acl_default : NULL, 1,
+ NFS_ACL_DEFAULT);
+ BUG_ON(error < 0);
}
+
#endif /* CONFIG_NFS_V3_ACL */
/*
- * NFS XDR decode functions
+ * NFSv3 XDR decode functions
+ *
+ * NFSv3 result types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
*/
/*
- * Decode attrstat reply.
+ * 3.3.1 GETATTR3res
+ *
+ * struct GETATTR3resok {
+ * fattr3 obj_attributes;
+ * };
+ *
+ * union GETATTR3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * GETATTR3resok resok;
+ * default:
+ * void;
+ * };
*/
-static int
-nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
{
- int status;
-
- if ((status = ntohl(*p++)))
- return nfs_stat_to_errno(status);
- xdr_decode_fattr(p, fattr);
- return 0;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_fattr3(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode status+wcc_data reply
- * SATTR, REMOVE, RMDIR
+ * 3.3.2 SETATTR3res
+ *
+ * struct SETATTR3resok {
+ * wcc_data obj_wcc;
+ * };
+ *
+ * struct SETATTR3resfail {
+ * wcc_data obj_wcc;
+ * };
+ *
+ * union SETATTR3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * SETATTR3resok resok;
+ * default:
+ * SETATTR3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
{
- int status;
-
- if ((status = ntohl(*p++)))
- status = nfs_stat_to_errno(status);
- xdr_decode_wcc_data(p, fattr);
- return status;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
}
-static int
-nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
+/*
+ * 3.3.3 LOOKUP3res
+ *
+ * struct LOOKUP3resok {
+ * nfs_fh3 object;
+ * post_op_attr obj_attributes;
+ * post_op_attr dir_attributes;
+ * };
+ *
+ * struct LOOKUP3resfail {
+ * post_op_attr dir_attributes;
+ * };
+ *
+ * union LOOKUP3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * LOOKUP3resok resok;
+ * default:
+ * LOOKUP3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_diropres *result)
{
- return nfs3_xdr_wccstat(req, p, res->dir_attr);
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_nfs_fh3(xdr, result->fh);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->dir_attr);
+out:
+ return error;
+out_default:
+ error = decode_post_op_attr(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ return nfs_stat_to_errno(status);
}
/*
- * Decode LOOKUP reply
+ * 3.3.4 ACCESS3res
+ *
+ * struct ACCESS3resok {
+ * post_op_attr obj_attributes;
+ * uint32 access;
+ * };
+ *
+ * struct ACCESS3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union ACCESS3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * ACCESS3resok resok;
+ * default:
+ * ACCESS3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
+static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_accessres *result)
{
- int status;
-
- if ((status = ntohl(*p++))) {
- status = nfs_stat_to_errno(status);
- } else {
- if (!(p = xdr_decode_fhandle(p, res->fh)))
- return -errno_NFSERR_IO;
- p = xdr_decode_post_op_attr(p, res->fattr);
- }
- xdr_decode_post_op_attr(p, res->dir_attr);
- return status;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_uint32(xdr, &result->access);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode ACCESS reply
+ * 3.3.5 READLINK3res
+ *
+ * struct READLINK3resok {
+ * post_op_attr symlink_attributes;
+ * nfspath3 data;
+ * };
+ *
+ * struct READLINK3resfail {
+ * post_op_attr symlink_attributes;
+ * };
+ *
+ * union READLINK3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * READLINK3resok resok;
+ * default:
+ * READLINK3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
+static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
{
- int status = ntohl(*p++);
-
- p = xdr_decode_post_op_attr(p, res->fattr);
- if (status)
- return nfs_stat_to_errno(status);
- res->access = ntohl(*p++);
- return 0;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_nfspath3(xdr);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
-static int
-nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
+/*
+ * 3.3.6 READ3res
+ *
+ * struct READ3resok {
+ * post_op_attr file_attributes;
+ * count3 count;
+ * bool eof;
+ * opaque data<>;
+ * };
+ *
+ * struct READ3resfail {
+ * post_op_attr file_attributes;
+ * };
+ *
+ * union READ3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * READ3resok resok;
+ * default:
+ * READ3resfail resfail;
+ * };
+ */
+static int decode_read3resok(struct xdr_stream *xdr,
+ struct nfs_readres *result)
{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
+ u32 eof, count, ocount, recvd;
+ size_t hdrlen;
+ __be32 *p;
- p = xdr_encode_fhandle(p, args->fh);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ p = xdr_inline_decode(xdr, 4 + 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p++);
+ eof = be32_to_cpup(p++);
+ ocount = be32_to_cpup(p++);
+ if (unlikely(ocount != count))
+ goto out_mismatch;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(count > recvd))
+ goto out_cheating;
+
+out:
+ xdr_read_pages(xdr, count);
+ result->eof = eof;
+ result->count = count;
+ return count;
+out_mismatch:
+ dprintk("NFS: READ count doesn't match length of opaque: "
+ "count %u != ocount %u\n", count, ocount);
+ return -EIO;
+out_cheating:
+ dprintk("NFS: server cheating in read result: "
+ "count %u > recvd %u\n", count, recvd);
+ count = recvd;
+ eof = 0;
+ goto out;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
- /* Inline the page array */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
- return 0;
+static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_readres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_read3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode READLINK reply
+ * 3.3.7 WRITE3res
+ *
+ * enum stable_how {
+ * UNSTABLE = 0,
+ * DATA_SYNC = 1,
+ * FILE_SYNC = 2
+ * };
+ *
+ * struct WRITE3resok {
+ * wcc_data file_wcc;
+ * count3 count;
+ * stable_how committed;
+ * writeverf3 verf;
+ * };
+ *
+ * struct WRITE3resfail {
+ * wcc_data file_wcc;
+ * };
+ *
+ * union WRITE3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * WRITE3resok resok;
+ * default:
+ * WRITE3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int decode_write3resok(struct xdr_stream *xdr,
+ struct nfs_writeres *result)
{
- struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
- size_t hdrlen;
- u32 len, recvd;
- int status;
-
- status = ntohl(*p++);
- p = xdr_decode_post_op_attr(p, fattr);
-
- if (status != 0)
- return nfs_stat_to_errno(status);
-
- /* Convert length of symlink */
- len = ntohl(*p++);
- if (len >= rcvbuf->page_len) {
- dprintk("nfs: server returned giant symlink!\n");
- return -ENAMETOOLONG;
- }
+ __be32 *p;
- hdrlen = (u8 *) p - (u8 *) iov->iov_base;
- if (iov->iov_len < hdrlen) {
- dprintk("NFS: READLINK reply header overflowed:"
- "length %Zu > %Zu\n", hdrlen, iov->iov_len);
- return -errno_NFSERR_IO;
- } else if (iov->iov_len != hdrlen) {
- dprintk("NFS: READLINK header is short. "
- "iovec will be shifted.\n");
- xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
- }
- recvd = req->rq_rcv_buf.len - hdrlen;
- if (recvd < len) {
- dprintk("NFS: server cheating in readlink reply: "
- "count %u > recvd %u\n", len, recvd);
- return -EIO;
- }
+ p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->count = be32_to_cpup(p++);
+ result->verf->committed = be32_to_cpup(p++);
+ if (unlikely(result->verf->committed > NFS_FILE_SYNC))
+ goto out_badvalue;
+ memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
+ return result->count;
+out_badvalue:
+ dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
- xdr_terminate_string(rcvbuf, len);
- return 0;
+static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_write3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode READ reply
+ * 3.3.8 CREATE3res
+ *
+ * struct CREATE3resok {
+ * post_op_fh3 obj;
+ * post_op_attr obj_attributes;
+ * wcc_data dir_wcc;
+ * };
+ *
+ * struct CREATE3resfail {
+ * wcc_data dir_wcc;
+ * };
+ *
+ * union CREATE3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * CREATE3resok resok;
+ * default:
+ * CREATE3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
+static int decode_create3resok(struct xdr_stream *xdr,
+ struct nfs3_diropres *result)
{
- struct kvec *iov = req->rq_rcv_buf.head;
- size_t hdrlen;
- u32 count, ocount, recvd;
- int status;
+ int error;
+
+ error = decode_post_op_fh3(xdr, result->fh);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ /* The server isn't required to return a file handle.
+ * If it didn't, force the client to perform a LOOKUP
+ * to determine the correct file handle and attribute
+ * values for the new object. */
+ if (result->fh->size == 0)
+ result->fattr->valid = 0;
+ error = decode_wcc_data(xdr, result->dir_attr);
+out:
+ return error;
+}
- status = ntohl(*p++);
- p = xdr_decode_post_op_attr(p, res->fattr);
+static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_diropres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_create3resok(xdr, result);
+out:
+ return error;
+out_default:
+ error = decode_wcc_data(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ return nfs_stat_to_errno(status);
+}
- if (status != 0)
- return nfs_stat_to_errno(status);
+/*
+ * 3.3.12 REMOVE3res
+ *
+ * struct REMOVE3resok {
+ * wcc_data dir_wcc;
+ * };
+ *
+ * struct REMOVE3resfail {
+ * wcc_data dir_wcc;
+ * };
+ *
+ * union REMOVE3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * REMOVE3resok resok;
+ * default:
+ * REMOVE3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_removeres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
- /* Decode reply count and EOF flag. NFSv3 is somewhat redundant
- * in that it puts the count both in the res struct and in the
- * opaque data count. */
- count = ntohl(*p++);
- res->eof = ntohl(*p++);
- ocount = ntohl(*p++);
+/*
+ * 3.3.14 RENAME3res
+ *
+ * struct RENAME3resok {
+ * wcc_data fromdir_wcc;
+ * wcc_data todir_wcc;
+ * };
+ *
+ * struct RENAME3resfail {
+ * wcc_data fromdir_wcc;
+ * wcc_data todir_wcc;
+ * };
+ *
+ * union RENAME3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * RENAME3resok resok;
+ * default:
+ * RENAME3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_renameres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->old_fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->new_fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
- if (ocount != count) {
- dprintk("NFS: READ count doesn't match RPC opaque count.\n");
- return -errno_NFSERR_IO;
- }
+/*
+ * 3.3.15 LINK3res
+ *
+ * struct LINK3resok {
+ * post_op_attr file_attributes;
+ * wcc_data linkdir_wcc;
+ * };
+ *
+ * struct LINK3resfail {
+ * post_op_attr file_attributes;
+ * wcc_data linkdir_wcc;
+ * };
+ *
+ * union LINK3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * LINK3resok resok;
+ * default:
+ * LINK3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs3_linkres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
- hdrlen = (u8 *) p - (u8 *) iov->iov_base;
- if (iov->iov_len < hdrlen) {
- dprintk("NFS: READ reply header overflowed:"
- "length %Zu > %Zu\n", hdrlen, iov->iov_len);
- return -errno_NFSERR_IO;
- } else if (iov->iov_len != hdrlen) {
- dprintk("NFS: READ header is short. iovec will be shifted.\n");
- xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
- }
+/**
+ * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
+ * the local page cache
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 3.3.16 entry3
+ *
+ * struct entry3 {
+ * fileid3 fileid;
+ * filename3 name;
+ * cookie3 cookie;
+ * fhandle3 filehandle;
+ * post_op_attr3 attributes;
+ * entry3 *nextentry;
+ * };
+ *
+ * 3.3.17 entryplus3
+ * struct entryplus3 {
+ * fileid3 fileid;
+ * filename3 name;
+ * cookie3 cookie;
+ * post_op_attr name_attributes;
+ * post_op_fh3 name_handle;
+ * entryplus3 *nextentry;
+ * };
+ */
+int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ int plus)
+{
+ struct nfs_entry old = *entry;
+ __be32 *p;
+ int error;
- recvd = req->rq_rcv_buf.len - hdrlen;
- if (count > recvd) {
- dprintk("NFS: server cheating in read reply: "
- "count %u > recvd %u\n", count, recvd);
- count = recvd;
- res->eof = 0;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p == xdr_zero) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p == xdr_zero)
+ return -EAGAIN;
+ entry->eof = 1;
+ return -EBADCOOKIE;
}
- if (count < res->count)
- res->count = count;
+ error = decode_fileid3(xdr, &entry->ino);
+ if (unlikely(error))
+ return error;
- return count;
-}
+ error = decode_inline_filename3(xdr, &entry->name, &entry->len);
+ if (unlikely(error))
+ return error;
-/*
- * Decode WRITE response
- */
-static int
-nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
-{
- int status;
+ entry->prev_cookie = entry->cookie;
+ error = decode_cookie3(xdr, &entry->cookie);
+ if (unlikely(error))
+ return error;
- status = ntohl(*p++);
- p = xdr_decode_wcc_data(p, res->fattr);
+ entry->d_type = DT_UNKNOWN;
- if (status != 0)
- return nfs_stat_to_errno(status);
+ if (plus) {
+ entry->fattr->valid = 0;
+ error = decode_post_op_attr(xdr, entry->fattr);
+ if (unlikely(error))
+ return error;
+ if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
+ entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
- res->count = ntohl(*p++);
- res->verf->committed = (enum nfs3_stable_how)ntohl(*p++);
- res->verf->verifier[0] = *p++;
- res->verf->verifier[1] = *p++;
+ /* In fact, a post_op_fh3: */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p != xdr_zero) {
+ error = decode_nfs_fh3(xdr, entry->fh);
+ if (unlikely(error)) {
+ if (error == -E2BIG)
+ goto out_truncated;
+ return error;
+ }
+ } else
+ zero_nfs_fh3(entry->fh);
+ }
- return res->count;
-}
+ return 0;
-/*
- * Decode a CREATE response
- */
-static int
-nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
-{
- int status;
-
- status = ntohl(*p++);
- if (status == 0) {
- if (*p++) {
- if (!(p = xdr_decode_fhandle(p, res->fh)))
- return -errno_NFSERR_IO;
- p = xdr_decode_post_op_attr(p, res->fattr);
- } else {
- memset(res->fh, 0, sizeof(*res->fh));
- /* Do decode post_op_attr but set it to NULL */
- p = xdr_decode_post_op_attr(p, res->fattr);
- res->fattr->valid = 0;
- }
- } else {
- status = nfs_stat_to_errno(status);
- }
- p = xdr_decode_wcc_data(p, res->dir_attr);
- return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EAGAIN;
+out_truncated:
+ dprintk("NFS: directory entry contains invalid file handle\n");
+ *entry = old;
+ return -EAGAIN;
}
/*
- * Decode RENAME reply
+ * 3.3.16 READDIR3res
+ *
+ * struct dirlist3 {
+ * entry3 *entries;
+ * bool eof;
+ * };
+ *
+ * struct READDIR3resok {
+ * post_op_attr dir_attributes;
+ * cookieverf3 cookieverf;
+ * dirlist3 reply;
+ * };
+ *
+ * struct READDIR3resfail {
+ * post_op_attr dir_attributes;
+ * };
+ *
+ * union READDIR3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * READDIR3resok resok;
+ * default:
+ * READDIR3resfail resfail;
+ * };
+ *
+ * Read the directory contents into the page cache, but otherwise
+ * don't touch them. The actual decoding is done by nfs3_decode_entry()
+ * during subsequent nfs_readdir() calls.
*/
-static int
-nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res)
+static int decode_dirlist3(struct xdr_stream *xdr)
{
- int status;
+ u32 recvd, pglen;
+ size_t hdrlen;
- if ((status = ntohl(*p++)) != 0)
- status = nfs_stat_to_errno(status);
- p = xdr_decode_wcc_data(p, res->old_fattr);
- p = xdr_decode_wcc_data(p, res->new_fattr);
- return status;
+ pglen = xdr->buf->page_len;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(pglen > recvd))
+ goto out_cheating;
+out:
+ xdr_read_pages(xdr, pglen);
+ return pglen;
+out_cheating:
+ dprintk("NFS: server cheating in readdir result: "
+ "pglen %u > recvd %u\n", pglen, recvd);
+ pglen = recvd;
+ goto out;
}
-/*
- * Decode LINK reply
- */
-static int
-nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res)
+static int decode_readdir3resok(struct xdr_stream *xdr,
+ struct nfs3_readdirres *result)
{
- int status;
+ int error;
+
+ error = decode_post_op_attr(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ /* XXX: do we need to check if result->verf != NULL ? */
+ error = decode_cookieverf3(xdr, result->verf);
+ if (unlikely(error))
+ goto out;
+ error = decode_dirlist3(xdr);
+out:
+ return error;
+}
- if ((status = ntohl(*p++)) != 0)
- status = nfs_stat_to_errno(status);
- p = xdr_decode_post_op_attr(p, res->fattr);
- p = xdr_decode_wcc_data(p, res->dir_attr);
- return status;
+static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_readdirres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_readdir3resok(xdr, result);
+out:
+ return error;
+out_default:
+ error = decode_post_op_attr(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ return nfs_stat_to_errno(status);
}
/*
- * Decode FSSTAT reply
+ * 3.3.18 FSSTAT3res
+ *
+ * struct FSSTAT3resok {
+ * post_op_attr obj_attributes;
+ * size3 tbytes;
+ * size3 fbytes;
+ * size3 abytes;
+ * size3 tfiles;
+ * size3 ffiles;
+ * size3 afiles;
+ * uint32 invarsec;
+ * };
+ *
+ * struct FSSTAT3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union FSSTAT3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * FSSTAT3resok resok;
+ * default:
+ * FSSTAT3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res)
+static int decode_fsstat3resok(struct xdr_stream *xdr,
+ struct nfs_fsstat *result)
{
- int status;
-
- status = ntohl(*p++);
-
- p = xdr_decode_post_op_attr(p, res->fattr);
- if (status != 0)
- return nfs_stat_to_errno(status);
-
- p = xdr_decode_hyper(p, &res->tbytes);
- p = xdr_decode_hyper(p, &res->fbytes);
- p = xdr_decode_hyper(p, &res->abytes);
- p = xdr_decode_hyper(p, &res->tfiles);
- p = xdr_decode_hyper(p, &res->ffiles);
- p = xdr_decode_hyper(p, &res->afiles);
+ __be32 *p;
+ p = xdr_inline_decode(xdr, 8 * 6 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ p = xdr_decode_size3(p, &result->tbytes);
+ p = xdr_decode_size3(p, &result->fbytes);
+ p = xdr_decode_size3(p, &result->abytes);
+ p = xdr_decode_size3(p, &result->tfiles);
+ p = xdr_decode_size3(p, &result->ffiles);
+ xdr_decode_size3(p, &result->afiles);
/* ignore invarsec */
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fsstat *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_fsstat3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode FSINFO reply
+ * 3.3.19 FSINFO3res
+ *
+ * struct FSINFO3resok {
+ * post_op_attr obj_attributes;
+ * uint32 rtmax;
+ * uint32 rtpref;
+ * uint32 rtmult;
+ * uint32 wtmax;
+ * uint32 wtpref;
+ * uint32 wtmult;
+ * uint32 dtpref;
+ * size3 maxfilesize;
+ * nfstime3 time_delta;
+ * uint32 properties;
+ * };
+ *
+ * struct FSINFO3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union FSINFO3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * FSINFO3resok resok;
+ * default:
+ * FSINFO3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res)
+static int decode_fsinfo3resok(struct xdr_stream *xdr,
+ struct nfs_fsinfo *result)
{
- int status;
-
- status = ntohl(*p++);
-
- p = xdr_decode_post_op_attr(p, res->fattr);
- if (status != 0)
- return nfs_stat_to_errno(status);
+ __be32 *p;
- res->rtmax = ntohl(*p++);
- res->rtpref = ntohl(*p++);
- res->rtmult = ntohl(*p++);
- res->wtmax = ntohl(*p++);
- res->wtpref = ntohl(*p++);
- res->wtmult = ntohl(*p++);
- res->dtpref = ntohl(*p++);
- p = xdr_decode_hyper(p, &res->maxfilesize);
- p = xdr_decode_time3(p, &res->time_delta);
+ p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->rtmax = be32_to_cpup(p++);
+ result->rtpref = be32_to_cpup(p++);
+ result->rtmult = be32_to_cpup(p++);
+ result->wtmax = be32_to_cpup(p++);
+ result->wtpref = be32_to_cpup(p++);
+ result->wtmult = be32_to_cpup(p++);
+ result->dtpref = be32_to_cpup(p++);
+ p = xdr_decode_size3(p, &result->maxfilesize);
+ xdr_decode_nfstime3(p, &result->time_delta);
/* ignore properties */
- res->lease_time = 0;
+ result->lease_time = 0;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fsinfo *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_fsinfo3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode PATHCONF reply
+ * 3.3.20 PATHCONF3res
+ *
+ * struct PATHCONF3resok {
+ * post_op_attr obj_attributes;
+ * uint32 linkmax;
+ * uint32 name_max;
+ * bool no_trunc;
+ * bool chown_restricted;
+ * bool case_insensitive;
+ * bool case_preserving;
+ * };
+ *
+ * struct PATHCONF3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union PATHCONF3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * PATHCONF3resok resok;
+ * default:
+ * PATHCONF3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res)
+static int decode_pathconf3resok(struct xdr_stream *xdr,
+ struct nfs_pathconf *result)
{
- int status;
-
- status = ntohl(*p++);
-
- p = xdr_decode_post_op_attr(p, res->fattr);
- if (status != 0)
- return nfs_stat_to_errno(status);
- res->max_link = ntohl(*p++);
- res->max_namelen = ntohl(*p++);
+ __be32 *p;
+ p = xdr_inline_decode(xdr, 4 * 6);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->max_link = be32_to_cpup(p++);
+ result->max_namelen = be32_to_cpup(p);
/* ignore remaining fields */
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_pathconf *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_pathconf3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
}
/*
- * Decode COMMIT reply
+ * 3.3.21 COMMIT3res
+ *
+ * struct COMMIT3resok {
+ * wcc_data file_wcc;
+ * writeverf3 verf;
+ * };
+ *
+ * struct COMMIT3resfail {
+ * wcc_data file_wcc;
+ * };
+ *
+ * union COMMIT3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * COMMIT3resok resok;
+ * default:
+ * COMMIT3resfail resfail;
+ * };
*/
-static int
-nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_writeres *result)
{
- int status;
-
- status = ntohl(*p++);
- p = xdr_decode_wcc_data(p, res->fattr);
- if (status != 0)
- return nfs_stat_to_errno(status);
-
- res->verf->verifier[0] = *p++;
- res->verf->verifier[1] = *p++;
- return 0;
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_writeverf3(xdr, result->verf->verifier);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
-/*
- * Decode GETACL reply
- */
-static int
-nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p,
- struct nfs3_getaclres *res)
+
+static inline int decode_getacl3resok(struct xdr_stream *xdr,
+ struct nfs3_getaclres *result)
{
- struct xdr_buf *buf = &req->rq_rcv_buf;
- int status = ntohl(*p++);
struct posix_acl **acl;
unsigned int *aclcnt;
- int err, base;
-
- if (status != 0)
- return nfs_stat_to_errno(status);
- p = xdr_decode_post_op_attr(p, res->fattr);
- res->mask = ntohl(*p++);
- if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
- return -EINVAL;
- base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base;
-
- acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL;
- aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL;
- err = nfsacl_decode(buf, base, aclcnt, acl);
-
- acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL;
- aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL;
- if (err > 0)
- err = nfsacl_decode(buf, base + err, aclcnt, acl);
- return (err > 0) ? 0 : err;
+ size_t hdrlen;
+ int error;
+
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_uint32(xdr, &result->mask);
+ if (unlikely(error))
+ goto out;
+ error = -EINVAL;
+ if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+ goto out;
+
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+
+ acl = NULL;
+ if (result->mask & NFS_ACL)
+ acl = &result->acl_access;
+ aclcnt = NULL;
+ if (result->mask & NFS_ACLCNT)
+ aclcnt = &result->acl_access_count;
+ error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
+ if (unlikely(error <= 0))
+ goto out;
+
+ acl = NULL;
+ if (result->mask & NFS_DFACL)
+ acl = &result->acl_default;
+ aclcnt = NULL;
+ if (result->mask & NFS_DFACLCNT)
+ aclcnt = &result->acl_default_count;
+ error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
+ if (unlikely(error <= 0))
+ return error;
+ error = 0;
+out:
+ return error;
}
-/*
- * Decode setacl reply.
- */
-static int
-nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_getaclres *result)
{
- int status = ntohl(*p++);
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_getacl3resok(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
- if (status)
- return nfs_stat_to_errno(status);
- xdr_decode_post_op_attr(p, fattr);
- return 0;
+static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_post_op_attr(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
}
+
#endif /* CONFIG_NFS_V3_ACL */
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
- .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \
- .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \
- .p_arglen = NFS3_##argtype##_sz, \
- .p_replen = NFS3_##restype##_sz, \
+ .p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \
+ .p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \
+ .p_arglen = NFS3_##argtype##args_sz, \
+ .p_replen = NFS3_##restype##res_sz, \
.p_timer = timer, \
.p_statidx = NFS3PROC_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs3_procedures[] = {
- PROC(GETATTR, fhandle, attrstat, 1),
- PROC(SETATTR, sattrargs, wccstat, 0),
- PROC(LOOKUP, diropargs, lookupres, 2),
- PROC(ACCESS, accessargs, accessres, 1),
- PROC(READLINK, readlinkargs, readlinkres, 3),
- PROC(READ, readargs, readres, 3),
- PROC(WRITE, writeargs, writeres, 4),
- PROC(CREATE, createargs, createres, 0),
- PROC(MKDIR, mkdirargs, createres, 0),
- PROC(SYMLINK, symlinkargs, createres, 0),
- PROC(MKNOD, mknodargs, createres, 0),
- PROC(REMOVE, removeargs, removeres, 0),
- PROC(RMDIR, diropargs, wccstat, 0),
- PROC(RENAME, renameargs, renameres, 0),
- PROC(LINK, linkargs, linkres, 0),
- PROC(READDIR, readdirargs, readdirres, 3),
- PROC(READDIRPLUS, readdirargs, readdirres, 3),
- PROC(FSSTAT, fhandle, fsstatres, 0),
- PROC(FSINFO, fhandle, fsinfores, 0),
- PROC(PATHCONF, fhandle, pathconfres, 0),
- PROC(COMMIT, commitargs, commitres, 5),
+ PROC(GETATTR, getattr, getattr, 1),
+ PROC(SETATTR, setattr, setattr, 0),
+ PROC(LOOKUP, lookup, lookup, 2),
+ PROC(ACCESS, access, access, 1),
+ PROC(READLINK, readlink, readlink, 3),
+ PROC(READ, read, read, 3),
+ PROC(WRITE, write, write, 4),
+ PROC(CREATE, create, create, 0),
+ PROC(MKDIR, mkdir, create, 0),
+ PROC(SYMLINK, symlink, create, 0),
+ PROC(MKNOD, mknod, create, 0),
+ PROC(REMOVE, remove, remove, 0),
+ PROC(RMDIR, lookup, setattr, 0),
+ PROC(RENAME, rename, rename, 0),
+ PROC(LINK, link, link, 0),
+ PROC(READDIR, readdir, readdir, 3),
+ PROC(READDIRPLUS, readdirplus, readdir, 3),
+ PROC(FSSTAT, getattr, fsstat, 0),
+ PROC(FSINFO, getattr, fsinfo, 0),
+ PROC(PATHCONF, getattr, pathconf, 0),
+ PROC(COMMIT, commit, commit, 5),
};
struct rpc_version nfs_version3 = {
@@ -1185,8 +2468,8 @@ struct rpc_version nfs_version3 = {
static struct rpc_procinfo nfs3_acl_procedures[] = {
[ACLPROC3_GETACL] = {
.p_proc = ACLPROC3_GETACL,
- .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs,
- .p_decode = (kxdrproc_t) nfs3_xdr_getaclres,
+ .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
+ .p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
.p_arglen = ACL3_getaclargs_sz,
.p_replen = ACL3_getaclres_sz,
.p_timer = 1,
@@ -1194,8 +2477,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = {
},
[ACLPROC3_SETACL] = {
.p_proc = ACLPROC3_SETACL,
- .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs,
- .p_decode = (kxdrproc_t) nfs3_xdr_setaclres,
+ .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
+ .p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
.p_arglen = ACL3_setaclargs_sz,
.p_replen = ACL3_setaclres_sz,
.p_timer = 0,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9fa496387fd..7a747407314 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
+ NFS4CLNT_LAYOUTRECALL,
NFS4CLNT_SESSION_RESET,
NFS4CLNT_RECALL_SLOT,
};
@@ -109,7 +110,7 @@ struct nfs_unique_id {
struct nfs4_state_owner {
struct nfs_unique_id so_owner_id;
struct nfs_server *so_server;
- struct rb_node so_client_node;
+ struct rb_node so_server_node;
struct rpc_cred *so_cred; /* Associated cred */
@@ -227,12 +228,6 @@ struct nfs4_state_maintenance_ops {
extern const struct dentry_operations nfs4_dentry_operations;
extern const struct inode_operations nfs4_dir_inode_operations;
-/* inode.c */
-extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
-extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);
-extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
-
-
/* nfs4proc.c */
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
@@ -241,11 +236,12 @@ extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait);
+extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
+extern const struct xattr_handler *nfs4_xattr_handlers[];
#if defined(CONFIG_NFS_V4_1)
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
@@ -331,7 +327,6 @@ extern void nfs_free_seqid(struct nfs_seqid *seqid);
extern const nfs4_stateid zero_stateid;
/* nfs4xdr.c */
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
extern struct rpc_procinfo nfs4_procedures[];
struct nfs4_mount_data;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 2e92f0d8d65..23f930caf1e 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,7 +82,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
{
struct nfs4_file_layout_dsaddr *dsaddr;
int status = -EINVAL;
- struct nfs_server *nfss = NFS_SERVER(lo->inode);
+ struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
dprintk("--> %s\n", __func__);
@@ -101,7 +101,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
/* find and reference the deviceid */
dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
if (dsaddr == NULL) {
- dsaddr = get_device_info(lo->inode, id);
+ dsaddr = get_device_info(lo->plh_inode, id);
if (dsaddr == NULL)
goto out;
}
@@ -243,7 +243,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
static void
filelayout_free_lseg(struct pnfs_layout_segment *lseg)
{
- struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode);
+ struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->plh_inode);
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
dprintk("--> %s\n", __func__);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4435e5e1f90..9d992b0346e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -49,6 +49,7 @@
#include <linux/mount.h>
#include <linux/module.h>
#include <linux/sunrpc/bc_xprt.h>
+#include <linux/xattr.h>
#include "nfs4_fs.h"
#include "delegation.h"
@@ -355,9 +356,9 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
}
/*
- * Signal state manager thread if session is drained
+ * Signal state manager thread if session fore channel is drained
*/
-static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
+static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
{
struct rpc_task *task;
@@ -371,8 +372,20 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
if (ses->fc_slot_table.highest_used_slotid != -1)
return;
- dprintk("%s COMPLETE: Session Drained\n", __func__);
- complete(&ses->complete);
+ dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
+ complete(&ses->fc_slot_table.complete);
+}
+
+/*
+ * Signal state manager thread if session back channel is drained
+ */
+void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
+{
+ if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
+ ses->bc_slot_table.highest_used_slotid != -1)
+ return;
+ dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
+ complete(&ses->bc_slot_table.complete);
}
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
@@ -389,7 +402,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slot);
- nfs41_check_drain_session_complete(res->sr_session);
+ nfs4_check_drain_fc_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
}
@@ -1826,6 +1839,8 @@ struct nfs4_closedata {
struct nfs_closeres res;
struct nfs_fattr fattr;
unsigned long timestamp;
+ bool roc;
+ u32 roc_barrier;
};
static void nfs4_free_closedata(void *data)
@@ -1833,6 +1848,8 @@ static void nfs4_free_closedata(void *data)
struct nfs4_closedata *calldata = data;
struct nfs4_state_owner *sp = calldata->state->owner;
+ if (calldata->roc)
+ pnfs_roc_release(calldata->state->inode);
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_state_owner(sp);
@@ -1865,6 +1882,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
*/
switch (task->tk_status) {
case 0:
+ if (calldata->roc)
+ pnfs_roc_set_barrier(state->inode,
+ calldata->roc_barrier);
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
nfs4_close_clear_stateid_flags(state,
@@ -1917,8 +1937,15 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
return;
}
- if (calldata->arg.fmode == 0)
+ if (calldata->arg.fmode == 0) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+ if (calldata->roc &&
+ pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
+ rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
+ task, NULL);
+ return;
+ }
+ }
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
@@ -1946,7 +1973,7 @@ static const struct rpc_call_ops nfs4_close_ops = {
*
* NOTE: Caller must be holding the sp->so_owner semaphore!
*/
-int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait)
+int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_closedata *calldata;
@@ -1981,11 +2008,12 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
+ calldata->roc = roc;
path_get(path);
calldata->path = *path;
- msg.rpc_argp = &calldata->arg,
- msg.rpc_resp = &calldata->res,
+ msg.rpc_argp = &calldata->arg;
+ msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
@@ -1998,6 +2026,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i
out_free_calldata:
kfree(calldata);
out:
+ if (roc)
+ pnfs_roc_release(state->inode);
nfs4_put_open_state(state);
nfs4_put_state_owner(sp);
return status;
@@ -2486,6 +2516,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
path = &ctx->path;
fmode = ctx->mode;
}
+ sattr->ia_mode &= ~current_umask();
state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
d_drop(dentry);
if (IS_ERR(state)) {
@@ -2816,6 +2847,8 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
{
struct nfs4_exception exception = { };
int err;
+
+ sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mkdir(dir, dentry, sattr),
@@ -2916,6 +2949,8 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
{
struct nfs4_exception exception = { };
int err;
+
+ sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mknod(dir, dentry, sattr, rdev),
@@ -3478,6 +3513,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
struct nfs4_setclientid setclientid = {
.sc_verifier = &sc_verifier,
.sc_prog = program,
+ .sc_cb_ident = clp->cl_cb_ident,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
@@ -3517,7 +3553,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
if (signalled())
break;
if (loop++ & 1)
- ssleep(clp->cl_lease_time + 1);
+ ssleep(clp->cl_lease_time / HZ + 1);
else
if (++clp->cl_id_uniquifier == 0)
break;
@@ -3663,8 +3699,8 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
data->rpc_status = 0;
task_setup_data.callback_data = data;
- msg.rpc_argp = &data->args,
- msg.rpc_resp = &data->res,
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -3743,6 +3779,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
goto out;
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
+ arg.lock_owner.s_dev = server->s_dev;
status = nfs4_call_sync(server, &msg, &arg, &res, 1);
switch (status) {
case 0:
@@ -3908,8 +3945,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
return ERR_PTR(-ENOMEM);
}
- msg.rpc_argp = &data->arg,
- msg.rpc_resp = &data->res,
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
return rpc_run_task(&task_setup_data);
}
@@ -3988,6 +4025,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
+ p->arg.lock_owner.s_dev = server->s_dev;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
@@ -4145,8 +4183,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
data->arg.reclaim = NFS_LOCK_RECLAIM;
task_setup_data.callback_ops = &nfs4_recover_lock_ops;
}
- msg.rpc_argp = &data->arg,
- msg.rpc_resp = &data->res,
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
@@ -4392,48 +4430,43 @@ void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
return;
args->lock_owner.clientid = server->nfs_client->cl_clientid;
args->lock_owner.id = lsp->ls_id.id;
+ args->lock_owner.s_dev = server->s_dev;
msg.rpc_argp = args;
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
}
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
-int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
- size_t buflen, int flags)
+static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
+ const void *buf, size_t buflen,
+ int flags, int type)
{
- struct inode *inode = dentry->d_inode;
-
- if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
- return -EOPNOTSUPP;
+ if (strcmp(key, "") != 0)
+ return -EINVAL;
- return nfs4_proc_set_acl(inode, buf, buflen);
+ return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
}
-/* The getxattr man page suggests returning -ENODATA for unknown attributes,
- * and that's what we'll do for e.g. user attributes that haven't been set.
- * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
- * attributes in kernel-managed attribute namespaces. */
-ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
- size_t buflen)
+static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
+ void *buf, size_t buflen, int type)
{
- struct inode *inode = dentry->d_inode;
-
- if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
- return -EOPNOTSUPP;
+ if (strcmp(key, "") != 0)
+ return -EINVAL;
- return nfs4_proc_get_acl(inode, buf, buflen);
+ return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
}
-ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
+static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
+ size_t list_len, const char *name,
+ size_t name_len, int type)
{
- size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
+ size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
return 0;
- if (buf && buflen < len)
- return -ERANGE;
- if (buf)
- memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
+
+ if (list && len <= list_len)
+ memcpy(list, XATTR_NAME_NFSV4_ACL, len);
return len;
}
@@ -4486,6 +4519,25 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
#ifdef CONFIG_NFS_V4_1
/*
+ * Check the exchange flags returned by the server for invalid flags, having
+ * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
+ * DS flags set.
+ */
+static int nfs4_check_cl_exchange_flags(u32 flags)
+{
+ if (flags & ~EXCHGID4_FLAG_MASK_R)
+ goto out_inval;
+ if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
+ (flags & EXCHGID4_FLAG_USE_NON_PNFS))
+ goto out_inval;
+ if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
+ goto out_inval;
+ return NFS_OK;
+out_inval:
+ return -NFS4ERR_INVAL;
+}
+
+/*
* nfs4_proc_exchange_id()
*
* Since the clientid has expired, all compounds using sessions
@@ -4498,7 +4550,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
nfs4_verifier verifier;
struct nfs41_exchange_id_args args = {
.client = clp,
- .flags = clp->cl_exchange_flags,
+ .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
};
struct nfs41_exchange_id_res res = {
.client = clp,
@@ -4515,9 +4567,6 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
- /* Remove server-only flags */
- args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
-
p = (u32 *)verifier.data;
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
@@ -4543,6 +4592,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
break;
}
+ status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
dprintk("<-- %s status= %d\n", __func__, status);
return status;
}
@@ -4776,17 +4826,17 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
if (!session)
return NULL;
- init_completion(&session->complete);
-
tbl = &session->fc_slot_table;
tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+ init_completion(&tbl->complete);
tbl = &session->bc_slot_table;
tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+ init_completion(&tbl->complete);
session->session_state = 1<<NFS4_SESSION_INITING;
@@ -5280,13 +5330,23 @@ static void
nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
- struct inode *ino = lgp->args.inode;
- struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
dprintk("--> %s\n", __func__);
+ /* Note the is a race here, where a CB_LAYOUTRECALL can come in
+ * right now covering the LAYOUTGET we are about to send.
+ * However, that is not so catastrophic, and there seems
+ * to be no way to prevent it completely.
+ */
if (nfs4_setup_sequence(server, &lgp->args.seq_args,
&lgp->res.seq_res, 0, task))
return;
+ if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
+ NFS_I(lgp->args.inode)->layout,
+ lgp->args.ctx->state)) {
+ rpc_exit(task, NFS4_OK);
+ return;
+ }
rpc_call_start(task);
}
@@ -5313,7 +5373,6 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
return;
}
}
- lgp->status = task->tk_status;
dprintk("<-- %s\n", __func__);
}
@@ -5322,7 +5381,6 @@ static void nfs4_layoutget_release(void *calldata)
struct nfs4_layoutget *lgp = calldata;
dprintk("--> %s\n", __func__);
- put_layout_hdr(lgp->args.inode);
if (lgp->res.layout.buf != NULL)
free_page((unsigned long) lgp->res.layout.buf);
put_nfs_open_context(lgp->args.ctx);
@@ -5367,13 +5425,10 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
- if (status != 0)
- goto out;
- status = lgp->status;
- if (status != 0)
- goto out;
- status = pnfs_layout_process(lgp);
-out:
+ if (status == 0)
+ status = task->tk_status;
+ if (status == 0)
+ status = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
@@ -5504,9 +5559,10 @@ static const struct inode_operations nfs4_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
- .getxattr = nfs4_getxattr,
- .setxattr = nfs4_setxattr,
- .listxattr = nfs4_listxattr,
+ .getxattr = generic_getxattr,
+ .setxattr = generic_setxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
};
const struct nfs_rpc_ops nfs_v4_clientops = {
@@ -5551,6 +5607,18 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.open_context = nfs4_atomic_open,
};
+static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
+ .prefix = XATTR_NAME_NFSV4_ACL,
+ .list = nfs4_xattr_list_nfs4_acl,
+ .get = nfs4_xattr_get_nfs4_acl,
+ .set = nfs4_xattr_set_nfs4_acl,
+};
+
+const struct xattr_handler *nfs4_xattr_handlers[] = {
+ &nfs4_xattr_nfs4_acl_handler,
+ NULL
+};
+
/*
* Local variables:
* c-basic-offset: 8
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 72b6c580af1..402143d75fc 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -63,9 +63,14 @@ nfs4_renew_state(struct work_struct *work)
ops = clp->cl_mvops->state_renewal_ops;
dprintk("%s: start\n", __func__);
- /* Are there any active superblocks? */
- if (list_empty(&clp->cl_superblocks))
+
+ rcu_read_lock();
+ if (list_empty(&clp->cl_superblocks)) {
+ rcu_read_unlock();
goto out;
+ }
+ rcu_read_unlock();
+
spin_lock(&clp->cl_lock);
lease = clp->cl_lease_time;
last = clp->cl_last_renewal;
@@ -75,7 +80,7 @@ nfs4_renew_state(struct work_struct *work)
cred = ops->get_state_renewal_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
- if (list_empty(&clp->cl_delegations)) {
+ if (!nfs_delegations_present(clp)) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
goto out;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f575a312673..2336d532cf6 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -105,14 +105,17 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp)
put_rpccred(cred);
}
-struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_renew_cred_server_locked(struct nfs_server *server)
{
+ struct rpc_cred *cred = NULL;
struct nfs4_state_owner *sp;
struct rb_node *pos;
- struct rpc_cred *cred = NULL;
- for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
- sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
if (list_empty(&sp->so_states))
continue;
cred = get_rpccred(sp->so_cred);
@@ -121,6 +124,28 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
return cred;
}
+/**
+ * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ * Caller must hold clp->cl_lock.
+ */
+struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+{
+ struct rpc_cred *cred = NULL;
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ cred = nfs4_get_renew_cred_server_locked(server);
+ if (cred != NULL)
+ break;
+ }
+ rcu_read_unlock();
+ return cred;
+}
+
#if defined(CONFIG_NFS_V4_1)
static int nfs41_setup_state_renewal(struct nfs_client *clp)
@@ -142,6 +167,11 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
return status;
}
+/*
+ * Back channel returns NFS4ERR_DELAY for new requests when
+ * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
+ * is ended.
+ */
static void nfs4_end_drain_session(struct nfs_client *clp)
{
struct nfs4_session *ses = clp->cl_session;
@@ -165,22 +195,32 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
}
}
-static int nfs4_begin_drain_session(struct nfs_client *clp)
+static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
{
- struct nfs4_session *ses = clp->cl_session;
- struct nfs4_slot_table *tbl = &ses->fc_slot_table;
-
spin_lock(&tbl->slot_tbl_lock);
- set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
if (tbl->highest_used_slotid != -1) {
- INIT_COMPLETION(ses->complete);
+ INIT_COMPLETION(tbl->complete);
spin_unlock(&tbl->slot_tbl_lock);
- return wait_for_completion_interruptible(&ses->complete);
+ return wait_for_completion_interruptible(&tbl->complete);
}
spin_unlock(&tbl->slot_tbl_lock);
return 0;
}
+static int nfs4_begin_drain_session(struct nfs_client *clp)
+{
+ struct nfs4_session *ses = clp->cl_session;
+ int ret = 0;
+
+ set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
+ /* back channel */
+ ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+ if (ret)
+ return ret;
+ /* fore channel */
+ return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+}
+
int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
int status;
@@ -192,6 +232,12 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
status = nfs4_proc_create_session(clp);
if (status != 0)
goto out;
+ status = nfs4_set_callback_sessionid(clp);
+ if (status != 0) {
+ printk(KERN_WARNING "Sessionid not set. No callback service\n");
+ nfs_callback_down(1);
+ status = 0;
+ }
nfs41_setup_state_renewal(clp);
nfs_mark_client_ready(clp, NFS_CS_READY);
out:
@@ -210,28 +256,56 @@ struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
#endif /* CONFIG_NFS_V4_1 */
-struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_setclientid_cred_server(struct nfs_server *server)
{
+ struct nfs_client *clp = server->nfs_client;
+ struct rpc_cred *cred = NULL;
struct nfs4_state_owner *sp;
struct rb_node *pos;
+
+ spin_lock(&clp->cl_lock);
+ pos = rb_first(&server->state_owners);
+ if (pos != NULL) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
+ cred = get_rpccred(sp->so_cred);
+ }
+ spin_unlock(&clp->cl_lock);
+ return cred;
+}
+
+/**
+ * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+{
+ struct nfs_server *server;
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
cred = nfs4_get_machine_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
if (cred != NULL)
goto out;
- pos = rb_first(&clp->cl_state_owners);
- if (pos != NULL) {
- sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
- cred = get_rpccred(sp->so_cred);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ cred = nfs4_get_setclientid_cred_server(server);
+ if (cred != NULL)
+ break;
}
+ rcu_read_unlock();
+
out:
- spin_unlock(&clp->cl_lock);
return cred;
}
-static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
- __u64 minval, int maxbits)
+static void nfs_alloc_unique_id_locked(struct rb_root *root,
+ struct nfs_unique_id *new,
+ __u64 minval, int maxbits)
{
struct rb_node **p, *parent;
struct nfs_unique_id *pos;
@@ -286,16 +360,15 @@ static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
}
static struct nfs4_state_owner *
-nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
{
- struct nfs_client *clp = server->nfs_client;
- struct rb_node **p = &clp->cl_state_owners.rb_node,
+ struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp, *res = NULL;
while (*p != NULL) {
parent = *p;
- sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+ sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
if (server < sp->so_server) {
p = &parent->rb_left;
@@ -319,24 +392,17 @@ nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
}
static struct nfs4_state_owner *
-nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
+nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
{
- struct rb_node **p = &clp->cl_state_owners.rb_node,
+ struct nfs_server *server = new->so_server;
+ struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp;
while (*p != NULL) {
parent = *p;
- sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+ sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
- if (new->so_server < sp->so_server) {
- p = &parent->rb_left;
- continue;
- }
- if (new->so_server > sp->so_server) {
- p = &parent->rb_right;
- continue;
- }
if (new->so_cred < sp->so_cred)
p = &parent->rb_left;
else if (new->so_cred > sp->so_cred)
@@ -346,18 +412,21 @@ nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
return sp;
}
}
- nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
- rb_link_node(&new->so_client_node, parent, p);
- rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
+ nfs_alloc_unique_id_locked(&server->openowner_id,
+ &new->so_owner_id, 1, 64);
+ rb_link_node(&new->so_server_node, parent, p);
+ rb_insert_color(&new->so_server_node, &server->state_owners);
return new;
}
static void
-nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
+nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
{
- if (!RB_EMPTY_NODE(&sp->so_client_node))
- rb_erase(&sp->so_client_node, &clp->cl_state_owners);
- nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
+ struct nfs_server *server = sp->so_server;
+
+ if (!RB_EMPTY_NODE(&sp->so_server_node))
+ rb_erase(&sp->so_server_node, &server->state_owners);
+ nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
}
/*
@@ -386,23 +455,32 @@ nfs4_alloc_state_owner(void)
static void
nfs4_drop_state_owner(struct nfs4_state_owner *sp)
{
- if (!RB_EMPTY_NODE(&sp->so_client_node)) {
- struct nfs_client *clp = sp->so_server->nfs_client;
+ if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+ struct nfs_server *server = sp->so_server;
+ struct nfs_client *clp = server->nfs_client;
spin_lock(&clp->cl_lock);
- rb_erase(&sp->so_client_node, &clp->cl_state_owners);
- RB_CLEAR_NODE(&sp->so_client_node);
+ rb_erase(&sp->so_server_node, &server->state_owners);
+ RB_CLEAR_NODE(&sp->so_server_node);
spin_unlock(&clp->cl_lock);
}
}
-struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+/**
+ * nfs4_get_state_owner - Look up a state owner given a credential
+ * @server: nfs_server to search
+ * @cred: RPC credential to match
+ *
+ * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
+ */
+struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
+ struct rpc_cred *cred)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *new;
spin_lock(&clp->cl_lock);
- sp = nfs4_find_state_owner(server, cred);
+ sp = nfs4_find_state_owner_locked(server, cred);
spin_unlock(&clp->cl_lock);
if (sp != NULL)
return sp;
@@ -412,7 +490,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
new->so_server = server;
new->so_cred = cred;
spin_lock(&clp->cl_lock);
- sp = nfs4_insert_state_owner(clp, new);
+ sp = nfs4_insert_state_owner_locked(new);
spin_unlock(&clp->cl_lock);
if (sp == new)
get_rpccred(cred);
@@ -423,6 +501,11 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
return sp;
}
+/**
+ * nfs4_put_state_owner - Release a nfs4_state_owner
+ * @sp: state owner data to release
+ *
+ */
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
{
struct nfs_client *clp = sp->so_server->nfs_client;
@@ -430,7 +513,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
return;
- nfs4_remove_state_owner(clp, sp);
+ nfs4_remove_state_owner_locked(sp);
spin_unlock(&clp->cl_lock);
rpc_destroy_wait_queue(&sp->so_sequence.wait);
put_rpccred(cred);
@@ -585,8 +668,11 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state,
if (!call_close) {
nfs4_put_open_state(state);
nfs4_put_state_owner(owner);
- } else
- nfs4_do_close(path, state, gfp_mask, wait);
+ } else {
+ bool roc = pnfs_roc(state->inode);
+
+ nfs4_do_close(path, state, gfp_mask, wait, roc);
+ }
}
void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
@@ -633,7 +719,8 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_p
static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
{
struct nfs4_lock_state *lsp;
- struct nfs_client *clp = state->owner->so_server->nfs_client;
+ struct nfs_server *server = state->owner->so_server;
+ struct nfs_client *clp = server->nfs_client;
lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
if (lsp == NULL)
@@ -657,7 +744,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
return NULL;
}
spin_lock(&clp->cl_lock);
- nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
+ nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64);
spin_unlock(&clp->cl_lock);
INIT_LIST_HEAD(&lsp->ls_locks);
return lsp;
@@ -665,10 +752,11 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
{
- struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
+ struct nfs_server *server = lsp->ls_state->owner->so_server;
+ struct nfs_client *clp = server->nfs_client;
spin_lock(&clp->cl_lock);
- nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
+ nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
spin_unlock(&clp->cl_lock);
rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
kfree(lsp);
@@ -1114,15 +1202,19 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
}
}
-static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+static void nfs4_reset_seqids(struct nfs_server *server,
+ int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
{
+ struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp;
struct rb_node *pos;
struct nfs4_state *state;
- /* Reset all sequence ids to zero */
- for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
- sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+ spin_lock(&clp->cl_lock);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
sp->so_seqid.flags = 0;
spin_lock(&sp->so_lock);
list_for_each_entry(state, &sp->so_states, open_states) {
@@ -1131,6 +1223,18 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_re
}
spin_unlock(&sp->so_lock);
}
+ spin_unlock(&clp->cl_lock);
+}
+
+static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
+ int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+{
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs4_reset_seqids(server, mark_reclaim);
+ rcu_read_unlock();
}
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
@@ -1148,25 +1252,41 @@ static void nfs4_reclaim_complete(struct nfs_client *clp,
(void)ops->reclaim_complete(clp);
}
-static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+static void nfs4_clear_reclaim_server(struct nfs_server *server)
{
+ struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp;
struct rb_node *pos;
struct nfs4_state *state;
- if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
- return 0;
-
- for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
- sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+ spin_lock(&clp->cl_lock);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
spin_lock(&sp->so_lock);
list_for_each_entry(state, &sp->so_states, open_states) {
- if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
+ if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
+ &state->flags))
continue;
nfs4_state_mark_reclaim_nograce(clp, state);
}
spin_unlock(&sp->so_lock);
}
+ spin_unlock(&clp->cl_lock);
+}
+
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+
+ if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs4_clear_reclaim_server(server);
+ rcu_read_unlock();
nfs_delegation_reap_unclaimed(clp);
return 1;
@@ -1238,27 +1358,40 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
{
+ struct nfs4_state_owner *sp;
+ struct nfs_server *server;
struct rb_node *pos;
int status = 0;
restart:
- spin_lock(&clp->cl_lock);
- for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
- struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
- if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
- continue;
- atomic_inc(&sp->so_count);
- spin_unlock(&clp->cl_lock);
- status = nfs4_reclaim_open_state(sp, ops);
- if (status < 0) {
- set_bit(ops->owner_flag_bit, &sp->so_flags);
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ spin_lock(&clp->cl_lock);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos,
+ struct nfs4_state_owner, so_server_node);
+ if (!test_and_clear_bit(ops->owner_flag_bit,
+ &sp->so_flags))
+ continue;
+ atomic_inc(&sp->so_count);
+ spin_unlock(&clp->cl_lock);
+ rcu_read_unlock();
+
+ status = nfs4_reclaim_open_state(sp, ops);
+ if (status < 0) {
+ set_bit(ops->owner_flag_bit, &sp->so_flags);
+ nfs4_put_state_owner(sp);
+ return nfs4_recovery_handle_error(clp, status);
+ }
+
nfs4_put_state_owner(sp);
- return nfs4_recovery_handle_error(clp, status);
+ goto restart;
}
- nfs4_put_state_owner(sp);
- goto restart;
+ spin_unlock(&clp->cl_lock);
}
- spin_unlock(&clp->cl_lock);
+ rcu_read_unlock();
return status;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 9f1826b012e..2ab8e5cb8f5 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -71,8 +71,8 @@ static int nfs4_stat_to_errno(int);
/* lock,open owner id:
* we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
*/
-#define open_owner_id_maxsz (1 + 4)
-#define lock_owner_id_maxsz (1 + 4)
+#define open_owner_id_maxsz (1 + 1 + 4)
+#define lock_owner_id_maxsz (1 + 1 + 4)
#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
@@ -1088,10 +1088,11 @@ static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lo
{
__be32 *p;
- p = reserve_space(xdr, 28);
+ p = reserve_space(xdr, 32);
p = xdr_encode_hyper(p, lowner->clientid);
- *p++ = cpu_to_be32(16);
+ *p++ = cpu_to_be32(20);
p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+ *p++ = cpu_to_be32(lowner->s_dev);
xdr_encode_hyper(p, lowner->id);
}
@@ -1210,10 +1211,11 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
*p++ = cpu_to_be32(OP_OPEN);
*p = cpu_to_be32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
- p = reserve_space(xdr, 28);
+ p = reserve_space(xdr, 32);
p = xdr_encode_hyper(p, arg->clientid);
- *p++ = cpu_to_be32(16);
+ *p++ = cpu_to_be32(20);
p = xdr_encode_opaque_fixed(p, "open id:", 8);
+ *p++ = cpu_to_be32(arg->server->s_dev);
xdr_encode_hyper(p, arg->id);
}
@@ -1510,7 +1512,7 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
hdr->replen += decode_restorefh_maxsz;
}
-static int
+static void
encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
{
__be32 *p;
@@ -1521,14 +1523,12 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun
p = reserve_space(xdr, 2*4);
*p++ = cpu_to_be32(1);
*p = cpu_to_be32(FATTR4_WORD0_ACL);
- if (arg->acl_len % 4)
- return -EINVAL;
+ BUG_ON(arg->acl_len % 4);
p = reserve_space(xdr, 4);
*p = cpu_to_be32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
hdr->nops++;
hdr->replen += decode_setacl_maxsz;
- return 0;
}
static void
@@ -1789,7 +1789,6 @@ encode_layoutget(struct xdr_stream *xdr,
const struct nfs4_layoutget_args *args,
struct compound_hdr *hdr)
{
- nfs4_stateid stateid;
__be32 *p;
p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
@@ -1800,9 +1799,7 @@ encode_layoutget(struct xdr_stream *xdr,
p = xdr_encode_hyper(p, args->range.offset);
p = xdr_encode_hyper(p, args->range.length);
p = xdr_encode_hyper(p, args->minlength);
- pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout,
- args->ctx->state);
- p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE);
+ p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
*p = cpu_to_be32(args->maxcount);
dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
@@ -1833,393 +1830,362 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
/*
* Encode an ACCESS request
*/
-static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs4_accessargs *args)
+static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_accessargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_access(&xdr, args->access, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_access(xdr, args->access, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode LOOKUP request
*/
-static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_arg *args)
+static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_lookup_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->dir_fh, &hdr);
- encode_lookup(&xdr, args->name, &hdr);
- encode_getfh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_lookup(xdr, args->name, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode LOOKUP_ROOT request
*/
-static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_root_arg *args)
+static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs4_lookup_root_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putrootfh(&xdr, &hdr);
- encode_getfh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putrootfh(xdr, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode REMOVE request
*/
-static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs_removeargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_remove(&xdr, &args->name, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_remove(xdr, &args->name, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode RENAME request
*/
-static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs_renameargs *args)
+static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs_renameargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->old_dir, &hdr);
- encode_savefh(&xdr, &hdr);
- encode_putfh(&xdr, args->new_dir, &hdr);
- encode_rename(&xdr, args->old_name, args->new_name, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
- encode_restorefh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->old_dir, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_putfh(xdr, args->new_dir, &hdr);
+ encode_rename(xdr, args->old_name, args->new_name, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode LINK request
*/
-static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_link_arg *args)
+static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_link_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_savefh(&xdr, &hdr);
- encode_putfh(&xdr, args->dir_fh, &hdr);
- encode_link(&xdr, args->name, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
- encode_restorefh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_link(xdr, args->name, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode CREATE request
*/
-static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_create_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->dir_fh, &hdr);
- encode_savefh(&xdr, &hdr);
- encode_create(&xdr, args, &hdr);
- encode_getfh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
- encode_restorefh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_create(xdr, args, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode SYMLINK request
*/
-static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_create_arg *args)
{
- return nfs4_xdr_enc_create(req, p, args);
+ nfs4_xdr_enc_create(req, xdr, args);
}
/*
* Encode GETATTR request
*/
-static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nfs4_getattr_arg *args)
+static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_getattr_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a CLOSE request
*/
-static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_closeargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_close(&xdr, args, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_close(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode an OPEN request
*/
-static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_openargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_savefh(&xdr, &hdr);
- encode_open(&xdr, args, &hdr);
- encode_getfh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
- encode_restorefh(&xdr, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_open(xdr, args, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode an OPEN_CONFIRM request
*/
-static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_open_confirmargs *args)
+static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_open_confirmargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 0,
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_open_confirm(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_open_confirm(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode an OPEN request with no attributes.
*/
-static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_openargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_open(&xdr, args, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_open(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode an OPEN_DOWNGRADE request
*/
-static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_closeargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_open_downgrade(&xdr, args, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_open_downgrade(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a LOCK request
*/
-static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_args *args)
+static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_lock_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_lock(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_lock(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a LOCKT request
*/
-static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_args *args)
+static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_lockt_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_lockt(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_lockt(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a LOCKU request
*/
-static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_args *args)
+static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_locku_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_locku(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_locku(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
-static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args)
+static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_release_lockowner_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = 0,
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_release_lockowner(xdr, &args->lock_owner, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a READLINK request
*/
-static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_readlink *args)
+static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_readlink *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_readlink(&xdr, args, req, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_readlink(xdr, args, req, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->pglen);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a READDIR request
*/
-static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nfs4_readdir_arg *args)
+static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_readdir_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_readdir(&xdr, args, req, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_readdir(xdr, args, req, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->count);
@@ -2227,428 +2193,387 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf
__func__, hdr.replen << 2, args->pages,
args->pgbase, args->count);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a READ request
*/
-static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_readargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_read(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_read(xdr, args, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
args->pages, args->pgbase, args->count);
req->rq_rcv_buf.flags |= XDRBUF_READ;
encode_nops(&hdr);
- return 0;
}
/*
* Encode an SETATTR request
*/
-static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_setattrargs *args)
+static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_setattrargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_setattr(&xdr, args, args->server, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_setattr(xdr, args, args->server, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a GETACL request
*/
-static int
-nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
- struct nfs_getaclargs *args)
+static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_getaclargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
uint32_t replen;
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
- encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
+ encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, args->acl_pgbase, args->acl_len);
encode_nops(&hdr);
- return 0;
}
/*
* Encode a WRITE request
*/
-static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_write(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_write(xdr, args, &hdr);
req->rq_snd_buf.flags |= XDRBUF_WRITE;
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a COMMIT request
*/
-static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_commit(&xdr, args, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_commit(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* FSINFO request
*/
-static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsinfo_arg *args)
+static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs4_fsinfo_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_fsinfo(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_fsinfo(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a PATHCONF request
*/
-static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct nfs4_pathconf_arg *args)
+static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_pathconf_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
&hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a STATFS request
*/
-static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs4_statfs_arg *args)
+static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_statfs_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* GETATTR_BITMAP request
*/
-static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p,
- struct nfs4_server_caps_arg *args)
+static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_server_caps_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fhandle, &hdr);
- encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fhandle, &hdr);
+ encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
FATTR4_WORD0_LINK_SUPPORT|
FATTR4_WORD0_SYMLINK_SUPPORT|
FATTR4_WORD0_ACLSUPPORT, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a RENEW request
*/
-static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
+static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_client *clp)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 0,
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_renew(&xdr, clp, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_renew(xdr, clp, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a SETCLIENTID request
*/
-static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid *sc)
+static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_setclientid *sc)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 0,
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_setclientid(&xdr, sc, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_setclientid(xdr, sc, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a SETCLIENTID_CONFIRM request
*/
-static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg)
+static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_setclientid_res *arg)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 0,
};
const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_setclientid_confirm(&xdr, arg, &hdr);
- encode_putrootfh(&xdr, &hdr);
- encode_fsinfo(&xdr, lease_bitmap, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_setclientid_confirm(xdr, arg, &hdr);
+ encode_putrootfh(xdr, &hdr);
+ encode_fsinfo(xdr, lease_bitmap, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* DELEGRETURN request
*/
-static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struct nfs4_delegreturnargs *args)
+static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs4_delegreturnargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fhandle, &hdr);
- encode_delegreturn(&xdr, args->stateid, &hdr);
- encode_getfattr(&xdr, args->bitmask, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fhandle, &hdr);
+ encode_delegreturn(xdr, args->stateid, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode FS_LOCATIONS request
*/
-static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations_arg *args)
+static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_fs_locations_arg *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
uint32_t replen;
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->dir_fh, &hdr);
- encode_lookup(&xdr, args->name, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_lookup(xdr, args->name, &hdr);
replen = hdr.replen; /* get the attribute into args->page */
- encode_fs_locations(&xdr, args->bitmask, &hdr);
+ encode_fs_locations(xdr, args->bitmask, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
0, PAGE_SIZE);
encode_nops(&hdr);
- return 0;
}
#if defined(CONFIG_NFS_V4_1)
/*
* EXCHANGE_ID request
*/
-static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
- struct nfs41_exchange_id_args *args)
+static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_exchange_id_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_exchange_id(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_exchange_id(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a CREATE_SESSION request
*/
-static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
- struct nfs41_create_session_args *args)
+static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_create_session_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_create_session(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_create_session(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a DESTROY_SESSION request
*/
-static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
- struct nfs4_session *session)
+static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_session *session)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = session->clp->cl_mvops->minor_version,
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_destroy_session(&xdr, session, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_destroy_session(xdr, session, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a SEQUENCE request
*/
-static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p,
- struct nfs4_sequence_args *args)
+static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs4_sequence_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a GET_LEASE_TIME request
*/
-static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
- struct nfs4_get_lease_time_args *args)
+static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_get_lease_time_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
};
const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->la_seq_args, &hdr);
- encode_putrootfh(&xdr, &hdr);
- encode_fsinfo(&xdr, lease_bitmap, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->la_seq_args, &hdr);
+ encode_putrootfh(xdr, &hdr);
+ encode_fsinfo(xdr, lease_bitmap, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* a RECLAIM_COMPLETE request
*/
-static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
- struct nfs41_reclaim_complete_args *args)
+static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_reclaim_complete_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args)
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_reclaim_complete(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_reclaim_complete(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
/*
* Encode GETDEVICEINFO request
*/
-static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
- struct nfs4_getdeviceinfo_args *args)
+static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_getdeviceinfo_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_getdeviceinfo(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_getdeviceinfo(xdr, args, &hdr);
/* set up reply kvec. Subtract notification bitmap max size (2)
* so that notification bitmap is put in xdr_buf tail */
@@ -2657,27 +2582,24 @@ static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
args->pdev->pglen);
encode_nops(&hdr);
- return 0;
}
/*
* Encode LAYOUTGET request
*/
-static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p,
- struct nfs4_layoutget_args *args)
+static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutget_args *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, NFS_FH(args->inode), &hdr);
- encode_layoutget(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutget(xdr, args, &hdr);
encode_nops(&hdr);
- return 0;
}
#endif /* CONFIG_NFS_V4_1 */
@@ -4475,7 +4397,7 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
goto out_overflow;
eof = be32_to_cpup(p++);
count = be32_to_cpup(p);
- hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+ hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (count > recvd) {
dprintk("NFS: server cheating in read reply: "
@@ -5000,7 +4922,7 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
goto out_overflow;
len = be32_to_cpup(p);
if (len) {
- int i;
+ uint32_t i;
p = xdr_inline_decode(xdr, 4 * len);
if (unlikely(!p))
@@ -5090,26 +5012,26 @@ out_overflow:
/*
* Decode OPEN_DOWNGRADE response
*/
-static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_closeres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_open_downgrade(&xdr, res);
+ status = decode_open_downgrade(xdr, res);
if (status != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5118,26 +5040,25 @@ out:
/*
* Decode ACCESS response
*/
-static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_accessres *res)
+static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_accessres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status != 0)
goto out;
- status = decode_access(&xdr, res);
+ status = decode_access(xdr, res);
if (status != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5146,26 +5067,28 @@ out:
/*
* Decode LOOKUP response
*/
-static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_lookup_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_lookup(&xdr)) != 0)
+ status = decode_lookup(xdr);
+ if (status)
goto out;
- if ((status = decode_getfh(&xdr, res->fh)) != 0)
+ status = decode_getfh(xdr, res->fh);
+ if (status)
goto out;
- status = decode_getfattr(&xdr, res->fattr, res->server
+ status = decode_getfattr(xdr, res->fattr, res->server
,!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5174,23 +5097,25 @@ out:
/*
* Decode LOOKUP_ROOT response
*/
-static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_lookup_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- if ((status = decode_putrootfh(&xdr)) != 0)
+ status = decode_putrootfh(xdr);
+ if (status)
goto out;
- if ((status = decode_getfh(&xdr, res->fh)) == 0)
- status = decode_getfattr(&xdr, res->fattr, res->server,
+ status = decode_getfh(xdr, res->fh);
+ if (status == 0)
+ status = decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5199,24 +5124,25 @@ out:
/*
* Decode REMOVE response
*/
-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res)
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_removeres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
+ status = decode_remove(xdr, &res->cinfo);
+ if (status)
goto out;
- decode_getfattr(&xdr, res->dir_attr, res->server,
+ decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5225,34 +5151,38 @@ out:
/*
* Decode RENAME response
*/
-static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs_renameres *res)
+static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_renameres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_savefh(&xdr)) != 0)
+ status = decode_savefh(xdr);
+ if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
+ status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
+ if (status)
goto out;
/* Current FH is target directory */
- if (decode_getfattr(&xdr, res->new_fattr, res->server,
+ if (decode_getfattr(xdr, res->new_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
- if ((status = decode_restorefh(&xdr)) != 0)
+ status = decode_restorefh(xdr);
+ if (status)
goto out;
- decode_getfattr(&xdr, res->old_fattr, res->server,
+ decode_getfattr(xdr, res->old_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5261,37 +5191,41 @@ out:
/*
* Decode LINK response
*/
-static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link_res *res)
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_link_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_savefh(&xdr)) != 0)
+ status = decode_savefh(xdr);
+ if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_link(&xdr, &res->cinfo)) != 0)
+ status = decode_link(xdr, &res->cinfo);
+ if (status)
goto out;
/*
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
- if (decode_getfattr(&xdr, res->dir_attr, res->server,
+ if (decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
- if ((status = decode_restorefh(&xdr)) != 0)
+ status = decode_restorefh(xdr);
+ if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5300,33 +5234,37 @@ out:
/*
* Decode CREATE response
*/
-static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_create_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_savefh(&xdr)) != 0)
+ status = decode_savefh(xdr);
+ if (status)
goto out;
- if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0)
+ status = decode_create(xdr, &res->dir_cinfo);
+ if (status)
goto out;
- if ((status = decode_getfh(&xdr, res->fh)) != 0)
+ status = decode_getfh(xdr, res->fh);
+ if (status)
goto out;
- if (decode_getfattr(&xdr, res->fattr, res->server,
+ if (decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
- if ((status = decode_restorefh(&xdr)) != 0)
+ status = decode_restorefh(xdr);
+ if (status)
goto out;
- decode_getfattr(&xdr, res->dir_fattr, res->server,
+ decode_getfattr(xdr, res->dir_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5335,31 +5273,31 @@ out:
/*
* Decode SYMLINK response
*/
-static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_create_res *res)
{
- return nfs4_xdr_dec_create(rqstp, p, res);
+ return nfs4_xdr_dec_create(rqstp, xdr, res);
}
/*
* Decode GETATTR response
*/
-static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_getattr_res *res)
+static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_getattr_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_getfattr(&xdr, res->fattr, res->server,
+ status = decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5368,46 +5306,40 @@ out:
/*
* Encode an SETACL request
*/
-static int
-nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args)
+static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_setaclargs *args)
{
- struct xdr_stream xdr;
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- int status;
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, req, &hdr);
- encode_sequence(&xdr, &args->seq_args, &hdr);
- encode_putfh(&xdr, args->fh, &hdr);
- status = encode_setacl(&xdr, args, &hdr);
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_setacl(xdr, args, &hdr);
encode_nops(&hdr);
- return status;
}
/*
* Decode SETACL response
*/
static int
-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_setaclres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_setattr(&xdr);
+ status = decode_setattr(xdr);
out:
return status;
}
@@ -5416,24 +5348,22 @@ out:
* Decode GETACL response
*/
static int
-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_getaclres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_getacl(&xdr, rqstp, &res->acl_len);
+ status = decode_getacl(xdr, rqstp, &res->acl_len);
out:
return status;
@@ -5442,23 +5372,22 @@ out:
/*
* Decode CLOSE response
*/
-static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_closeres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_close(&xdr, res);
+ status = decode_close(xdr, res);
if (status != 0)
goto out;
/*
@@ -5467,7 +5396,7 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos
* an ESTALE error. Shouldn't be a problem,
* though, since fattr->valid will remain unset.
*/
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5476,36 +5405,35 @@ out:
/*
* Decode OPEN response
*/
-static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_openres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_savefh(&xdr);
+ status = decode_savefh(xdr);
if (status)
goto out;
- status = decode_open(&xdr, res);
+ status = decode_open(xdr, res);
if (status)
goto out;
- if (decode_getfh(&xdr, &res->fh) != 0)
+ if (decode_getfh(xdr, &res->fh) != 0)
goto out;
- if (decode_getfattr(&xdr, res->f_attr, res->server,
+ if (decode_getfattr(xdr, res->f_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
- if (decode_restorefh(&xdr) != 0)
+ if (decode_restorefh(xdr) != 0)
goto out;
- decode_getfattr(&xdr, res->dir_attr, res->server,
+ decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5514,20 +5442,20 @@ out:
/*
* Decode OPEN_CONFIRM response
*/
-static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, __be32 *p, struct nfs_open_confirmres *res)
+static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_open_confirmres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_open_confirm(&xdr, res);
+ status = decode_open_confirm(xdr, res);
out:
return status;
}
@@ -5535,26 +5463,26 @@ out:
/*
* Decode OPEN response
*/
-static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_openres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_open(&xdr, res);
+ status = decode_open(xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->f_attr, res->server,
+ decode_getfattr(xdr, res->f_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5563,26 +5491,26 @@ out:
/*
* Decode SETATTR response
*/
-static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_setattrres *res)
+static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_setattrres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_setattr(&xdr);
+ status = decode_setattr(xdr);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5591,23 +5519,22 @@ out:
/*
* Decode LOCK response
*/
-static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock_res *res)
+static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_lock_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_lock(&xdr, res);
+ status = decode_lock(xdr, res);
out:
return status;
}
@@ -5615,23 +5542,22 @@ out:
/*
* Decode LOCKT response
*/
-static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lockt_res *res)
+static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_lockt_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_lockt(&xdr, res);
+ status = decode_lockt(xdr, res);
out:
return status;
}
@@ -5639,61 +5565,58 @@ out:
/*
* Decode LOCKU response
*/
-static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, __be32 *p, struct nfs_locku_res *res)
+static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_locku_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_locku(&xdr, res);
+ status = decode_locku(xdr, res);
out:
return status;
}
-static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr, void *dummy)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_release_lockowner(&xdr);
+ status = decode_release_lockowner(xdr);
return status;
}
/*
* Decode READLINK response
*/
-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p,
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
struct nfs4_readlink_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_readlink(&xdr, rqstp);
+ status = decode_readlink(xdr, rqstp);
out:
return status;
}
@@ -5701,23 +5624,22 @@ out:
/*
* Decode READDIR response
*/
-static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_readdir_res *res)
+static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_readdir_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_readdir(&xdr, rqstp, res);
+ status = decode_readdir(xdr, rqstp, res);
out:
return status;
}
@@ -5725,23 +5647,22 @@ out:
/*
* Decode Read response
*/
-static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, __be32 *p, struct nfs_readres *res)
+static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_readres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_read(&xdr, rqstp, res);
+ status = decode_read(xdr, rqstp, res);
if (!status)
status = res->count;
out:
@@ -5751,26 +5672,25 @@ out:
/*
* Decode WRITE response
*/
-static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_writeres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_write(&xdr, res);
+ status = decode_write(xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
if (!status)
status = res->count;
@@ -5781,26 +5701,25 @@ out:
/*
* Decode COMMIT response
*/
-static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_writeres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_commit(&xdr, res);
+ status = decode_commit(xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5809,85 +5728,80 @@ out:
/*
* Decode FSINFO response
*/
-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_fsinfo_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_sequence(&xdr, &res->seq_res, req);
+ status = decode_sequence(xdr, &res->seq_res, req);
if (!status)
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (!status)
- status = decode_fsinfo(&xdr, res->fsinfo);
+ status = decode_fsinfo(xdr, res->fsinfo);
return status;
}
/*
* Decode PATHCONF response
*/
-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_pathconf_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_sequence(&xdr, &res->seq_res, req);
+ status = decode_sequence(xdr, &res->seq_res, req);
if (!status)
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (!status)
- status = decode_pathconf(&xdr, res->pathconf);
+ status = decode_pathconf(xdr, res->pathconf);
return status;
}
/*
* Decode STATFS response
*/
-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_statfs_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_sequence(&xdr, &res->seq_res, req);
+ status = decode_sequence(xdr, &res->seq_res, req);
if (!status)
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (!status)
- status = decode_statfs(&xdr, res->fsstat);
+ status = decode_statfs(xdr, res->fsstat);
return status;
}
/*
* Decode GETATTR_BITMAP response
*/
-static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res)
+static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_server_caps_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, req);
+ status = decode_sequence(xdr, &res->seq_res, req);
if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- status = decode_server_caps(&xdr, res);
+ status = decode_server_caps(xdr, res);
out:
return status;
}
@@ -5895,79 +5809,77 @@ out:
/*
* Decode RENEW response
*/
-static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ void *__unused)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_renew(&xdr);
+ status = decode_renew(xdr);
return status;
}
/*
* Decode SETCLIENTID response
*/
-static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
- struct nfs4_setclientid_res *res)
+static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_setclientid_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_setclientid(&xdr, res);
+ status = decode_setclientid(xdr, res);
return status;
}
/*
* Decode SETCLIENTID_CONFIRM response
*/
-static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
+static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fsinfo *fsinfo)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_setclientid_confirm(&xdr);
+ status = decode_setclientid_confirm(xdr);
if (!status)
- status = decode_putrootfh(&xdr);
+ status = decode_putrootfh(xdr);
if (!status)
- status = decode_fsinfo(&xdr, fsinfo);
+ status = decode_fsinfo(xdr, fsinfo);
return status;
}
/*
* Decode DELEGRETURN response
*/
-static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res)
+static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_delegreturnres *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status != 0)
goto out;
- status = decode_delegreturn(&xdr);
+ status = decode_delegreturn(xdr);
if (status != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server,
+ decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5976,26 +5888,27 @@ out:
/*
* Decode FS_LOCATIONS response
*/
-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
struct nfs4_fs_locations_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, req);
+ status = decode_sequence(xdr, &res->seq_res, req);
if (status)
goto out;
- if ((status = decode_putfh(&xdr)) != 0)
+ status = decode_putfh(xdr);
+ if (status)
goto out;
- if ((status = decode_lookup(&xdr)) != 0)
+ status = decode_lookup(xdr);
+ if (status)
goto out;
- xdr_enter_page(&xdr, PAGE_SIZE);
- status = decode_getfattr(&xdr, &res->fs_locations->fattr,
+ xdr_enter_page(xdr, PAGE_SIZE);
+ status = decode_getfattr(xdr, &res->fs_locations->fattr,
res->fs_locations->server,
!RPC_IS_ASYNC(req->rq_task));
out:
@@ -6006,129 +5919,122 @@ out:
/*
* Decode EXCHANGE_ID response
*/
-static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
void *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_exchange_id(&xdr, res);
+ status = decode_exchange_id(xdr, res);
return status;
}
/*
* Decode CREATE_SESSION response
*/
-static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
struct nfs41_create_session_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_create_session(&xdr, res);
+ status = decode_create_session(xdr, res);
return status;
}
/*
* Decode DESTROY_SESSION response
*/
-static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
- void *dummy)
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_destroy_session(&xdr, dummy);
+ status = decode_destroy_session(xdr, res);
return status;
}
/*
* Decode SEQUENCE response
*/
-static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
struct nfs4_sequence_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_sequence(&xdr, res, rqstp);
+ status = decode_sequence(xdr, res, rqstp);
return status;
}
/*
* Decode GET_LEASE_TIME response
*/
-static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
struct nfs4_get_lease_time_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_sequence(&xdr, &res->lr_seq_res, rqstp);
+ status = decode_sequence(xdr, &res->lr_seq_res, rqstp);
if (!status)
- status = decode_putrootfh(&xdr);
+ status = decode_putrootfh(xdr);
if (!status)
- status = decode_fsinfo(&xdr, res->lr_fsinfo);
+ status = decode_fsinfo(xdr, res->lr_fsinfo);
return status;
}
/*
* Decode RECLAIM_COMPLETE response
*/
-static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
struct nfs41_reclaim_complete_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (!status)
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (!status)
- status = decode_reclaim_complete(&xdr, (void *)NULL);
+ status = decode_reclaim_complete(xdr, (void *)NULL);
return status;
}
/*
* Decode GETDEVINFO response
*/
-static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
struct nfs4_getdeviceinfo_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status != 0)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status != 0)
goto out;
- status = decode_getdeviceinfo(&xdr, res->pdev);
+ status = decode_getdeviceinfo(xdr, res->pdev);
out:
return status;
}
@@ -6136,31 +6042,44 @@ out:
/*
* Decode LAYOUTGET response
*/
-static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
struct nfs4_layoutget_res *res)
{
- struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_compound_hdr(&xdr, &hdr);
+ status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
- status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
- status = decode_putfh(&xdr);
+ status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_layoutget(&xdr, rqstp, res);
+ status = decode_layoutget(xdr, rqstp, res);
out:
return status;
}
#endif /* CONFIG_NFS_V4_1 */
-__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
- struct nfs_server *server, int plus)
+/**
+ * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
+ * the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ */
+int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ int plus)
{
uint32_t bitmap[2] = {0};
uint32_t len;
@@ -6172,9 +6091,9 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (unlikely(!p))
goto out_overflow;
if (!ntohl(*p++))
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
entry->eof = 1;
- return ERR_PTR(-EBADCOOKIE);
+ return -EBADCOOKIE;
}
p = xdr_inline_decode(xdr, 12);
@@ -6203,7 +6122,8 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (decode_attr_length(xdr, &len, &p) < 0)
goto out_overflow;
- if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, server, 1) < 0)
+ if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
+ entry->server, 1) < 0)
goto out_overflow;
if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
entry->ino = entry->fattr->fileid;
@@ -6215,17 +6135,11 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (verify_attr_len(xdr, p, len) < 0)
goto out_overflow;
- p = xdr_inline_peek(xdr, 8);
- if (p != NULL)
- entry->eof = !p[0] && p[1];
- else
- entry->eof = 0;
-
- return p;
+ return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
/*
@@ -6301,8 +6215,8 @@ nfs4_stat_to_errno(int stat)
#define PROC(proc, argtype, restype) \
[NFSPROC4_CLNT_##proc] = { \
.p_proc = NFSPROC4_COMPOUND, \
- .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
- .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
+ .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \
+ .p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \
.p_arglen = NFS4_##argtype##_sz, \
.p_replen = NFS4_##restype##_sz, \
.p_statidx = NFSPROC4_CLNT_##proc, \
@@ -6310,50 +6224,50 @@ nfs4_stat_to_errno(int stat)
}
struct rpc_procinfo nfs4_procedures[] = {
- PROC(READ, enc_read, dec_read),
- PROC(WRITE, enc_write, dec_write),
- PROC(COMMIT, enc_commit, dec_commit),
- PROC(OPEN, enc_open, dec_open),
- PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm),
- PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr),
- PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade),
- PROC(CLOSE, enc_close, dec_close),
- PROC(SETATTR, enc_setattr, dec_setattr),
- PROC(FSINFO, enc_fsinfo, dec_fsinfo),
- PROC(RENEW, enc_renew, dec_renew),
- PROC(SETCLIENTID, enc_setclientid, dec_setclientid),
- PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
- PROC(LOCK, enc_lock, dec_lock),
- PROC(LOCKT, enc_lockt, dec_lockt),
- PROC(LOCKU, enc_locku, dec_locku),
- PROC(ACCESS, enc_access, dec_access),
- PROC(GETATTR, enc_getattr, dec_getattr),
- PROC(LOOKUP, enc_lookup, dec_lookup),
- PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
- PROC(REMOVE, enc_remove, dec_remove),
- PROC(RENAME, enc_rename, dec_rename),
- PROC(LINK, enc_link, dec_link),
- PROC(SYMLINK, enc_symlink, dec_symlink),
- PROC(CREATE, enc_create, dec_create),
- PROC(PATHCONF, enc_pathconf, dec_pathconf),
- PROC(STATFS, enc_statfs, dec_statfs),
- PROC(READLINK, enc_readlink, dec_readlink),
- PROC(READDIR, enc_readdir, dec_readdir),
- PROC(SERVER_CAPS, enc_server_caps, dec_server_caps),
- PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn),
- PROC(GETACL, enc_getacl, dec_getacl),
- PROC(SETACL, enc_setacl, dec_setacl),
- PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
- PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
+ PROC(READ, enc_read, dec_read),
+ PROC(WRITE, enc_write, dec_write),
+ PROC(COMMIT, enc_commit, dec_commit),
+ PROC(OPEN, enc_open, dec_open),
+ PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm),
+ PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr),
+ PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade),
+ PROC(CLOSE, enc_close, dec_close),
+ PROC(SETATTR, enc_setattr, dec_setattr),
+ PROC(FSINFO, enc_fsinfo, dec_fsinfo),
+ PROC(RENEW, enc_renew, dec_renew),
+ PROC(SETCLIENTID, enc_setclientid, dec_setclientid),
+ PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
+ PROC(LOCK, enc_lock, dec_lock),
+ PROC(LOCKT, enc_lockt, dec_lockt),
+ PROC(LOCKU, enc_locku, dec_locku),
+ PROC(ACCESS, enc_access, dec_access),
+ PROC(GETATTR, enc_getattr, dec_getattr),
+ PROC(LOOKUP, enc_lookup, dec_lookup),
+ PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
+ PROC(REMOVE, enc_remove, dec_remove),
+ PROC(RENAME, enc_rename, dec_rename),
+ PROC(LINK, enc_link, dec_link),
+ PROC(SYMLINK, enc_symlink, dec_symlink),
+ PROC(CREATE, enc_create, dec_create),
+ PROC(PATHCONF, enc_pathconf, dec_pathconf),
+ PROC(STATFS, enc_statfs, dec_statfs),
+ PROC(READLINK, enc_readlink, dec_readlink),
+ PROC(READDIR, enc_readdir, dec_readdir),
+ PROC(SERVER_CAPS, enc_server_caps, dec_server_caps),
+ PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn),
+ PROC(GETACL, enc_getacl, dec_getacl),
+ PROC(SETACL, enc_setacl, dec_setacl),
+ PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
+ PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
#if defined(CONFIG_NFS_V4_1)
- PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
- PROC(CREATE_SESSION, enc_create_session, dec_create_session),
- PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
- PROC(SEQUENCE, enc_sequence, dec_sequence),
- PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
- PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
- PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
- PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
+ PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
+ PROC(CREATE_SESSION, enc_create_session, dec_create_session),
+ PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
+ PROC(SEQUENCE, enc_sequence, dec_sequence),
+ PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+ PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
+ PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
+ PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b68536cc904..e1164e3f9e6 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,12 +26,9 @@ static struct kmem_cache *nfs_page_cachep;
static inline struct nfs_page *
nfs_page_alloc(void)
{
- struct nfs_page *p;
- p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
- if (p) {
- memset(p, 0, sizeof(*p));
+ struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+ if (p)
INIT_LIST_HEAD(&p->wb_list);
- }
return p;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index db773428f95..bc408976973 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -177,105 +177,149 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
* pNFS client layout cache
*/
+/* Need to hold i_lock if caller does not already hold reference */
+void
+get_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ atomic_inc(&lo->plh_refcount);
+}
+
static void
-get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+destroy_layout_hdr(struct pnfs_layout_hdr *lo)
{
- assert_spin_locked(&lo->inode->i_lock);
- lo->refcount++;
+ dprintk("%s: freeing layout cache %p\n", __func__, lo);
+ BUG_ON(!list_empty(&lo->plh_layouts));
+ NFS_I(lo->plh_inode)->layout = NULL;
+ kfree(lo);
}
static void
put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
{
- assert_spin_locked(&lo->inode->i_lock);
- BUG_ON(lo->refcount == 0);
-
- lo->refcount--;
- if (!lo->refcount) {
- dprintk("%s: freeing layout cache %p\n", __func__, lo);
- BUG_ON(!list_empty(&lo->layouts));
- NFS_I(lo->inode)->layout = NULL;
- kfree(lo);
- }
+ if (atomic_dec_and_test(&lo->plh_refcount))
+ destroy_layout_hdr(lo);
}
void
-put_layout_hdr(struct inode *inode)
+put_layout_hdr(struct pnfs_layout_hdr *lo)
{
- spin_lock(&inode->i_lock);
- put_layout_hdr_locked(NFS_I(inode)->layout);
- spin_unlock(&inode->i_lock);
+ struct inode *inode = lo->plh_inode;
+
+ if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+ destroy_layout_hdr(lo);
+ spin_unlock(&inode->i_lock);
+ }
}
static void
init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
{
- INIT_LIST_HEAD(&lseg->fi_list);
- kref_init(&lseg->kref);
- lseg->layout = lo;
+ INIT_LIST_HEAD(&lseg->pls_list);
+ atomic_set(&lseg->pls_refcount, 1);
+ smp_mb();
+ set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
+ lseg->pls_layout = lo;
}
-/* Called without i_lock held, as the free_lseg call may sleep */
-static void
-destroy_lseg(struct kref *kref)
+static void free_lseg(struct pnfs_layout_segment *lseg)
{
- struct pnfs_layout_segment *lseg =
- container_of(kref, struct pnfs_layout_segment, kref);
- struct inode *ino = lseg->layout->inode;
+ struct inode *ino = lseg->pls_layout->plh_inode;
- dprintk("--> %s\n", __func__);
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
- /* Matched by get_layout_hdr_locked in pnfs_insert_layout */
- put_layout_hdr(ino);
+ /* Matched by get_layout_hdr in pnfs_insert_layout */
+ put_layout_hdr(NFS_I(ino)->layout);
}
-static void
-put_lseg(struct pnfs_layout_segment *lseg)
+/* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
+ * could sleep, so must be called outside of the lock.
+ * Returns 1 if object was removed, otherwise return 0.
+ */
+static int
+put_lseg_locked(struct pnfs_layout_segment *lseg,
+ struct list_head *tmp_list)
+{
+ dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
+ atomic_read(&lseg->pls_refcount),
+ test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+ if (atomic_dec_and_test(&lseg->pls_refcount)) {
+ struct inode *ino = lseg->pls_layout->plh_inode;
+
+ BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+ list_del(&lseg->pls_list);
+ if (list_empty(&lseg->pls_layout->plh_segs)) {
+ struct nfs_client *clp;
+
+ clp = NFS_SERVER(ino)->nfs_client;
+ spin_lock(&clp->cl_lock);
+ /* List does not take a reference, so no need for put here */
+ list_del_init(&lseg->pls_layout->plh_layouts);
+ spin_unlock(&clp->cl_lock);
+ clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags);
+ }
+ rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
+ list_add(&lseg->pls_list, tmp_list);
+ return 1;
+ }
+ return 0;
+}
+
+static bool
+should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
{
- if (!lseg)
- return;
+ return (recall_iomode == IOMODE_ANY ||
+ lseg_iomode == recall_iomode);
+}
- dprintk("%s: lseg %p ref %d\n", __func__, lseg,
- atomic_read(&lseg->kref.refcount));
- kref_put(&lseg->kref, destroy_lseg);
+/* Returns 1 if lseg is removed from list, 0 otherwise */
+static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
+ struct list_head *tmp_list)
+{
+ int rv = 0;
+
+ if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+ /* Remove the reference keeping the lseg in the
+ * list. It will now be removed when all
+ * outstanding io is finished.
+ */
+ rv = put_lseg_locked(lseg, tmp_list);
+ }
+ return rv;
}
-static void
-pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
+/* Returns count of number of matching invalid lsegs remaining in list
+ * after call.
+ */
+int
+mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+ struct list_head *tmp_list,
+ u32 iomode)
{
struct pnfs_layout_segment *lseg, *next;
- struct nfs_client *clp;
+ int invalid = 0, removed = 0;
dprintk("%s:Begin lo %p\n", __func__, lo);
- assert_spin_locked(&lo->inode->i_lock);
- list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
- dprintk("%s: freeing lseg %p\n", __func__, lseg);
- list_move(&lseg->fi_list, tmp_list);
- }
- clp = NFS_SERVER(lo->inode)->nfs_client;
- spin_lock(&clp->cl_lock);
- /* List does not take a reference, so no need for put here */
- list_del_init(&lo->layouts);
- spin_unlock(&clp->cl_lock);
- write_seqlock(&lo->seqlock);
- clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
- write_sequnlock(&lo->seqlock);
-
- dprintk("%s:Return\n", __func__);
+ list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+ if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
+ dprintk("%s: freeing lseg %p iomode %d "
+ "offset %llu length %llu\n", __func__,
+ lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
+ lseg->pls_range.length);
+ invalid++;
+ removed += mark_lseg_invalid(lseg, tmp_list);
+ }
+ dprintk("%s:Return %i\n", __func__, invalid - removed);
+ return invalid - removed;
}
-static void
-pnfs_free_lseg_list(struct list_head *tmp_list)
+void
+pnfs_free_lseg_list(struct list_head *free_me)
{
- struct pnfs_layout_segment *lseg;
+ struct pnfs_layout_segment *lseg, *tmp;
- while (!list_empty(tmp_list)) {
- lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
- fi_list);
- dprintk("%s calling put_lseg on %p\n", __func__, lseg);
- list_del(&lseg->fi_list);
- put_lseg(lseg);
+ list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
+ list_del(&lseg->pls_list);
+ free_lseg(lseg);
}
}
@@ -288,7 +332,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
spin_lock(&nfsi->vfs_inode.i_lock);
lo = nfsi->layout;
if (lo) {
- pnfs_clear_lseg_list(lo, &tmp_list);
+ set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags);
+ mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
/* Matched by refcount set to 1 in alloc_init_layout_hdr */
put_layout_hdr_locked(lo);
}
@@ -312,76 +357,80 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
while (!list_empty(&tmp_list)) {
lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
- layouts);
+ plh_layouts);
dprintk("%s freeing layout for inode %lu\n", __func__,
- lo->inode->i_ino);
- pnfs_destroy_layout(NFS_I(lo->inode));
+ lo->plh_inode->i_ino);
+ pnfs_destroy_layout(NFS_I(lo->plh_inode));
}
}
-/* update lo->stateid with new if is more recent
- *
- * lo->stateid could be the open stateid, in which case we just use what given.
- */
-static void
-pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
- const nfs4_stateid *new)
-{
- nfs4_stateid *old = &lo->stateid;
- bool overwrite = false;
-
- write_seqlock(&lo->seqlock);
- if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
- memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
- overwrite = true;
- else {
- u32 oldseq, newseq;
-
- oldseq = be32_to_cpu(old->stateid.seqid);
- newseq = be32_to_cpu(new->stateid.seqid);
- if ((int)(newseq - oldseq) > 0)
- overwrite = true;
+/* update lo->plh_stateid with new if is more recent */
+void
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
+ bool update_barrier)
+{
+ u32 oldseq, newseq;
+
+ oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+ newseq = be32_to_cpu(new->stateid.seqid);
+ if ((int)(newseq - oldseq) > 0) {
+ memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
+ if (update_barrier) {
+ u32 new_barrier = be32_to_cpu(new->stateid.seqid);
+
+ if ((int)(new_barrier - lo->plh_barrier))
+ lo->plh_barrier = new_barrier;
+ } else {
+ /* Because of wraparound, we want to keep the barrier
+ * "close" to the current seqids. It needs to be
+ * within 2**31 to count as "behind", so if it
+ * gets too near that limit, give us a litle leeway
+ * and bring it to within 2**30.
+ * NOTE - and yes, this is all unsigned arithmetic.
+ */
+ if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
+ lo->plh_barrier = newseq - (1 << 30);
+ }
}
- if (overwrite)
- memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
- write_sequnlock(&lo->seqlock);
}
-static void
-pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
- struct nfs4_state *state)
+/* lget is set to 1 if called from inside send_layoutget call chain */
+static bool
+pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
+ int lget)
{
- int seq;
-
- dprintk("--> %s\n", __func__);
- write_seqlock(&lo->seqlock);
- do {
- seq = read_seqbegin(&state->seqlock);
- memcpy(lo->stateid.data, state->stateid.data,
- sizeof(state->stateid.data));
- } while (read_seqretry(&state->seqlock, seq));
- set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
- write_sequnlock(&lo->seqlock);
- dprintk("<-- %s\n", __func__);
+ if ((stateid) &&
+ (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
+ return true;
+ return lo->plh_block_lgets ||
+ test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+ (list_empty(&lo->plh_segs) &&
+ (atomic_read(&lo->plh_outstanding) > lget));
}
-void
-pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
- struct nfs4_state *open_state)
+int
+pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state)
{
- int seq;
+ int status = 0;
dprintk("--> %s\n", __func__);
- do {
- seq = read_seqbegin(&lo->seqlock);
- if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
- /* This will trigger retry of the read */
- pnfs_layout_from_open_stateid(lo, open_state);
- } else
- memcpy(dst->data, lo->stateid.data,
- sizeof(lo->stateid.data));
- } while (read_seqretry(&lo->seqlock, seq));
+ spin_lock(&lo->plh_inode->i_lock);
+ if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
+ status = -EAGAIN;
+ } else if (list_empty(&lo->plh_segs)) {
+ int seq;
+
+ do {
+ seq = read_seqbegin(&open_state->seqlock);
+ memcpy(dst->data, open_state->stateid.data,
+ sizeof(open_state->stateid.data));
+ } while (read_seqretry(&open_state->seqlock, seq));
+ } else
+ memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
+ spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
+ return status;
}
/*
@@ -395,7 +444,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_open_context *ctx,
u32 iomode)
{
- struct inode *ino = lo->inode;
+ struct inode *ino = lo->plh_inode;
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
struct pnfs_layout_segment *lseg = NULL;
@@ -404,10 +453,8 @@ send_layoutget(struct pnfs_layout_hdr *lo,
BUG_ON(ctx == NULL);
lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
- if (lgp == NULL) {
- put_layout_hdr(lo->inode);
+ if (lgp == NULL)
return NULL;
- }
lgp->args.minlength = NFS4_MAX_UINT64;
lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
lgp->args.range.iomode = iomode;
@@ -424,11 +471,88 @@ send_layoutget(struct pnfs_layout_hdr *lo,
nfs4_proc_layoutget(lgp);
if (!lseg) {
/* remember that LAYOUTGET failed and suspend trying */
- set_bit(lo_fail_bit(iomode), &lo->state);
+ set_bit(lo_fail_bit(iomode), &lo->plh_flags);
}
return lseg;
}
+bool pnfs_roc(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_segment *lseg, *tmp;
+ LIST_HEAD(tmp_list);
+ bool found = false;
+
+ spin_lock(&ino->i_lock);
+ lo = NFS_I(ino)->layout;
+ if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
+ test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
+ goto out_nolayout;
+ list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
+ if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+ mark_lseg_invalid(lseg, &tmp_list);
+ found = true;
+ }
+ if (!found)
+ goto out_nolayout;
+ lo->plh_block_lgets++;
+ get_layout_hdr(lo); /* matched in pnfs_roc_release */
+ spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+ return true;
+
+out_nolayout:
+ spin_unlock(&ino->i_lock);
+ return false;
+}
+
+void pnfs_roc_release(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo;
+
+ spin_lock(&ino->i_lock);
+ lo = NFS_I(ino)->layout;
+ lo->plh_block_lgets--;
+ put_layout_hdr_locked(lo);
+ spin_unlock(&ino->i_lock);
+}
+
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+ struct pnfs_layout_hdr *lo;
+
+ spin_lock(&ino->i_lock);
+ lo = NFS_I(ino)->layout;
+ if ((int)(barrier - lo->plh_barrier) > 0)
+ lo->plh_barrier = barrier;
+ spin_unlock(&ino->i_lock);
+}
+
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_segment *lseg;
+ bool found = false;
+
+ spin_lock(&ino->i_lock);
+ list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
+ if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+ found = true;
+ break;
+ }
+ if (!found) {
+ struct pnfs_layout_hdr *lo = nfsi->layout;
+ u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+
+ /* Since close does not return a layout stateid for use as
+ * a barrier, we choose the worst-case barrier.
+ */
+ *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
+ }
+ spin_unlock(&ino->i_lock);
+ return found;
+}
+
/*
* Compare two layout segments for sorting into layout cache.
* We want to preferentially return RW over RO layouts, so ensure those
@@ -450,37 +574,29 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
dprintk("%s:Begin\n", __func__);
- assert_spin_locked(&lo->inode->i_lock);
- if (list_empty(&lo->segs)) {
- struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
-
- spin_lock(&clp->cl_lock);
- BUG_ON(!list_empty(&lo->layouts));
- list_add_tail(&lo->layouts, &clp->cl_layouts);
- spin_unlock(&clp->cl_lock);
- }
- list_for_each_entry(lp, &lo->segs, fi_list) {
- if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
+ assert_spin_locked(&lo->plh_inode->i_lock);
+ list_for_each_entry(lp, &lo->plh_segs, pls_list) {
+ if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
continue;
- list_add_tail(&lseg->fi_list, &lp->fi_list);
+ list_add_tail(&lseg->pls_list, &lp->pls_list);
dprintk("%s: inserted lseg %p "
"iomode %d offset %llu length %llu before "
"lp %p iomode %d offset %llu length %llu\n",
- __func__, lseg, lseg->range.iomode,
- lseg->range.offset, lseg->range.length,
- lp, lp->range.iomode, lp->range.offset,
- lp->range.length);
+ __func__, lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset, lseg->pls_range.length,
+ lp, lp->pls_range.iomode, lp->pls_range.offset,
+ lp->pls_range.length);
found = 1;
break;
}
if (!found) {
- list_add_tail(&lseg->fi_list, &lo->segs);
+ list_add_tail(&lseg->pls_list, &lo->plh_segs);
dprintk("%s: inserted lseg %p "
"iomode %d offset %llu length %llu at tail\n",
- __func__, lseg, lseg->range.iomode,
- lseg->range.offset, lseg->range.length);
+ __func__, lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset, lseg->pls_range.length);
}
- get_layout_hdr_locked(lo);
+ get_layout_hdr(lo);
dprintk("%s:Return\n", __func__);
}
@@ -493,11 +609,11 @@ alloc_init_layout_hdr(struct inode *ino)
lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
if (!lo)
return NULL;
- lo->refcount = 1;
- INIT_LIST_HEAD(&lo->layouts);
- INIT_LIST_HEAD(&lo->segs);
- seqlock_init(&lo->seqlock);
- lo->inode = ino;
+ atomic_set(&lo->plh_refcount, 1);
+ INIT_LIST_HEAD(&lo->plh_layouts);
+ INIT_LIST_HEAD(&lo->plh_segs);
+ INIT_LIST_HEAD(&lo->plh_bulk_recall);
+ lo->plh_inode = ino;
return lo;
}
@@ -510,9 +626,12 @@ pnfs_find_alloc_layout(struct inode *ino)
dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
assert_spin_locked(&ino->i_lock);
- if (nfsi->layout)
- return nfsi->layout;
-
+ if (nfsi->layout) {
+ if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
+ return NULL;
+ else
+ return nfsi->layout;
+ }
spin_unlock(&ino->i_lock);
new = alloc_init_layout_hdr(ino);
spin_lock(&ino->i_lock);
@@ -538,31 +657,32 @@ pnfs_find_alloc_layout(struct inode *ino)
static int
is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
{
- return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
+ return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
}
/*
* lookup range in layout
*/
static struct pnfs_layout_segment *
-pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
+pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
{
struct pnfs_layout_segment *lseg, *ret = NULL;
dprintk("%s:Begin\n", __func__);
- assert_spin_locked(&lo->inode->i_lock);
- list_for_each_entry(lseg, &lo->segs, fi_list) {
- if (is_matching_lseg(lseg, iomode)) {
+ assert_spin_locked(&lo->plh_inode->i_lock);
+ list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+ if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
+ is_matching_lseg(lseg, iomode)) {
ret = lseg;
break;
}
- if (cmp_layout(iomode, lseg->range.iomode) > 0)
+ if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
break;
}
dprintk("%s:Return lseg %p ref %d\n",
- __func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
+ __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
return ret;
}
@@ -576,6 +696,7 @@ pnfs_update_layout(struct inode *ino,
enum pnfs_iomode iomode)
{
struct nfs_inode *nfsi = NFS_I(ino);
+ struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg = NULL;
@@ -588,25 +709,53 @@ pnfs_update_layout(struct inode *ino,
goto out_unlock;
}
- /* Check to see if the layout for the given range already exists */
- lseg = pnfs_has_layout(lo, iomode);
- if (lseg) {
- dprintk("%s: Using cached lseg %p for iomode %d)\n",
- __func__, lseg, iomode);
+ /* Do we even need to bother with this? */
+ if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+ test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+ dprintk("%s matches recall, use MDS\n", __func__);
goto out_unlock;
}
+ /* Check to see if the layout for the given range already exists */
+ lseg = pnfs_find_lseg(lo, iomode);
+ if (lseg)
+ goto out_unlock;
/* if LAYOUTGET already failed once we don't try again */
- if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
+ if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
+ goto out_unlock;
+
+ if (pnfs_layoutgets_blocked(lo, NULL, 0))
goto out_unlock;
+ atomic_inc(&lo->plh_outstanding);
- get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
+ get_layout_hdr(lo);
+ if (list_empty(&lo->plh_segs)) {
+ /* The lo must be on the clp list if there is any
+ * chance of a CB_LAYOUTRECALL(FILE) coming in.
+ */
+ spin_lock(&clp->cl_lock);
+ BUG_ON(!list_empty(&lo->plh_layouts));
+ list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
+ spin_unlock(&clp->cl_lock);
+ }
spin_unlock(&ino->i_lock);
lseg = send_layoutget(lo, ctx, iomode);
+ if (!lseg) {
+ spin_lock(&ino->i_lock);
+ if (list_empty(&lo->plh_segs)) {
+ spin_lock(&clp->cl_lock);
+ list_del_init(&lo->plh_layouts);
+ spin_unlock(&clp->cl_lock);
+ clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+ }
+ spin_unlock(&ino->i_lock);
+ }
+ atomic_dec(&lo->plh_outstanding);
+ put_layout_hdr(lo);
out:
dprintk("%s end, state 0x%lx lseg %p\n", __func__,
- nfsi->layout->state, lseg);
+ nfsi->layout->plh_flags, lseg);
return lseg;
out_unlock:
spin_unlock(&ino->i_lock);
@@ -619,9 +768,21 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
struct nfs4_layoutget_res *res = &lgp->res;
struct pnfs_layout_segment *lseg;
- struct inode *ino = lo->inode;
+ struct inode *ino = lo->plh_inode;
+ struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
int status = 0;
+ /* Verify we got what we asked for.
+ * Note that because the xdr parsing only accepts a single
+ * element array, this can fail even if the server is behaving
+ * correctly.
+ */
+ if (lgp->args.range.iomode > res->range.iomode ||
+ res->range.offset != 0 ||
+ res->range.length != NFS4_MAX_UINT64) {
+ status = -EINVAL;
+ goto out;
+ }
/* Inject layout blob into I/O device driver */
lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
if (!lseg || IS_ERR(lseg)) {
@@ -635,16 +796,37 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
}
spin_lock(&ino->i_lock);
+ if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+ test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+ dprintk("%s forget reply due to recall\n", __func__);
+ goto out_forget_reply;
+ }
+
+ if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
+ dprintk("%s forget reply due to state\n", __func__);
+ goto out_forget_reply;
+ }
init_lseg(lo, lseg);
- lseg->range = res->range;
+ lseg->pls_range = res->range;
*lgp->lsegpp = lseg;
pnfs_insert_layout(lo, lseg);
+ if (res->return_on_close) {
+ set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
+ set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
+ }
+
/* Done processing layoutget. Set the layout stateid */
- pnfs_set_layout_stateid(lo, &res->stateid);
+ pnfs_set_layout_stateid(lo, &res->stateid, false);
spin_unlock(&ino->i_lock);
out:
return status;
+
+out_forget_reply:
+ spin_unlock(&ino->i_lock);
+ lseg->pls_layout = lo;
+ NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+ goto out;
}
/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index e12367d5048..e2612ea0cbe 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,11 +30,17 @@
#ifndef FS_NFS_PNFS_H
#define FS_NFS_PNFS_H
+enum {
+ NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
+ NFS_LSEG_ROC, /* roc bit received from server */
+};
+
struct pnfs_layout_segment {
- struct list_head fi_list;
- struct pnfs_layout_range range;
- struct kref kref;
- struct pnfs_layout_hdr *layout;
+ struct list_head pls_list;
+ struct pnfs_layout_range pls_range;
+ atomic_t pls_refcount;
+ unsigned long pls_flags;
+ struct pnfs_layout_hdr *pls_layout;
};
#ifdef CONFIG_NFS_V4_1
@@ -44,7 +50,9 @@ struct pnfs_layout_segment {
enum {
NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
- NFS_LAYOUT_STATEID_SET, /* have a valid layout stateid */
+ NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
+ NFS_LAYOUT_ROC, /* some lseg had roc bit set */
+ NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
};
/* Per-layout driver specific registration structure */
@@ -60,13 +68,16 @@ struct pnfs_layoutdriver_type {
};
struct pnfs_layout_hdr {
- unsigned long refcount;
- struct list_head layouts; /* other client layouts */
- struct list_head segs; /* layout segments list */
- seqlock_t seqlock; /* Protects the stateid */
- nfs4_stateid stateid;
- unsigned long state;
- struct inode *inode;
+ atomic_t plh_refcount;
+ struct list_head plh_layouts; /* other client layouts */
+ struct list_head plh_bulk_recall; /* clnt list of bulk recalls */
+ struct list_head plh_segs; /* layout segments list */
+ nfs4_stateid plh_stateid;
+ atomic_t plh_outstanding; /* number of RPCs out */
+ unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */
+ u32 plh_barrier; /* ignore lower seqids */
+ unsigned long plh_flags;
+ struct inode *plh_inode;
};
struct pnfs_device {
@@ -134,17 +145,30 @@ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
/* pnfs.c */
+void get_layout_hdr(struct pnfs_layout_hdr *lo);
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
enum pnfs_iomode access_type);
void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
void unset_pnfs_layoutdriver(struct nfs_server *);
int pnfs_layout_process(struct nfs4_layoutget *lgp);
+void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
void pnfs_destroy_all_layouts(struct nfs_client *);
-void put_layout_hdr(struct inode *inode);
-void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
- struct nfs4_state *open_state);
+void put_layout_hdr(struct pnfs_layout_hdr *lo);
+void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *new,
+ bool update_barrier);
+int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
+ struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state);
+int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+ struct list_head *tmp_list,
+ u32 iomode);
+bool pnfs_roc(struct inode *ino);
+void pnfs_roc_release(struct inode *ino);
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
static inline int lo_fail_bit(u32 iomode)
@@ -176,6 +200,28 @@ pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
return NULL;
}
+static inline bool
+pnfs_roc(struct inode *ino)
+{
+ return false;
+}
+
+static inline void
+pnfs_roc_release(struct inode *ino)
+{
+}
+
+static inline void
+pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+}
+
+static inline bool
+pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+ return false;
+}
+
static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
{
}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 58e7f84fc1f..77d5e21c4ad 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -458,7 +458,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
fattr = nfs_alloc_fattr();
status = -ENOMEM;
if (fh == NULL || fattr == NULL)
- goto out;
+ goto out_free;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
@@ -471,6 +471,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
if (status == 0)
status = nfs_instantiate(dentry, fh, fattr);
+out_free:
nfs_free_fattr(fattr);
nfs_free_fhandle(fh);
out:
@@ -731,7 +732,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.statfs = nfs_proc_statfs,
.fsinfo = nfs_proc_fsinfo,
.pathconf = nfs_proc_pathconf,
- .decode_dirent = nfs_decode_dirent,
+ .decode_dirent = nfs2_decode_dirent,
.read_setup = nfs_proc_read_setup,
.read_done = nfs_read_done,
.write_setup = nfs_proc_write_setup,
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4100630c9a5..0f9ea73e778 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -598,7 +598,9 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
if (nfss->mountd_version || showdefaults)
seq_printf(m, ",mountvers=%u", nfss->mountd_version);
- if (nfss->mountd_port || showdefaults)
+ if ((nfss->mountd_port &&
+ nfss->mountd_port != (unsigned short)NFS_UNSPEC_PORT) ||
+ showdefaults)
seq_printf(m, ",mountport=%u", nfss->mountd_port);
nfs_show_mountd_netid(m, nfss, showdefaults);
@@ -2494,7 +2496,13 @@ static void nfs4_clone_super(struct super_block *sb,
sb->s_maxbytes = old_sb->s_maxbytes;
sb->s_time_gran = 1;
sb->s_op = old_sb->s_op;
- nfs_initialise_sb(sb);
+ /*
+ * The VFS shouldn't apply the umask to mode bits. We will do
+ * so ourselves when necessary.
+ */
+ sb->s_flags |= MS_POSIXACL;
+ sb->s_xattr = old_sb->s_xattr;
+ nfs_initialise_sb(sb);
}
/*
@@ -2504,6 +2512,12 @@ static void nfs4_fill_super(struct super_block *sb)
{
sb->s_time_gran = 1;
sb->s_op = &nfs4_sops;
+ /*
+ * The VFS shouldn't apply the umask to mode bits. We will do
+ * so ourselves when necessary.
+ */
+ sb->s_flags |= MS_POSIXACL;
+ sb->s_xattr = nfs4_xattr_handlers;
nfs_initialise_sb(sb);
}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 8fe9eb47a97..e313a51acdd 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -429,7 +429,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return ERR_PTR(-ENOMEM);
- task_setup_data.callback_data = data,
+ task_setup_data.callback_data = data;
data->cred = rpc_lookup_cred();
if (IS_ERR(data->cred)) {
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 143da2eecd7..21a63da305f 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -50,11 +50,6 @@ enum {
NFSPROC4_CLNT_CB_SEQUENCE,
};
-enum nfs_cb_opnum4 {
- OP_CB_RECALL = 4,
- OP_CB_SEQUENCE = 11,
-};
-
#define NFS4_MAXTAGLEN 20
#define NFS4_enc_cb_null_sz 0
@@ -79,61 +74,6 @@ enum nfs_cb_opnum4 {
cb_sequence_dec_sz + \
op_dec_sz)
-/*
-* Generic encode routines from fs/nfs/nfs4xdr.c
-*/
-static inline __be32 *
-xdr_writemem(__be32 *p, const void *ptr, int nbytes)
-{
- int tmp = XDR_QUADLEN(nbytes);
- if (!tmp)
- return p;
- p[tmp-1] = 0;
- memcpy(p, ptr, nbytes);
- return p + tmp;
-}
-
-#define WRITE32(n) *p++ = htonl(n)
-#define WRITEMEM(ptr,nbytes) do { \
- p = xdr_writemem(p, ptr, nbytes); \
-} while (0)
-#define RESERVE_SPACE(nbytes) do { \
- p = xdr_reserve_space(xdr, nbytes); \
- if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
- BUG_ON(!p); \
-} while (0)
-
-/*
- * Generic decode routines from fs/nfs/nfs4xdr.c
- */
-#define DECODE_TAIL \
- status = 0; \
-out: \
- return status; \
-xdr_error: \
- dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
- status = -EIO; \
- goto out
-
-#define READ32(x) (x) = ntohl(*p++)
-#define READ64(x) do { \
- (x) = (u64)ntohl(*p++) << 32; \
- (x) |= ntohl(*p++); \
-} while (0)
-#define READTIME(x) do { \
- p++; \
- (x.tv_sec) = ntohl(*p++); \
- (x.tv_nsec) = ntohl(*p++); \
-} while (0)
-#define READ_BUF(nbytes) do { \
- p = xdr_inline_decode(xdr, nbytes); \
- if (!p) { \
- dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
- __func__, __LINE__); \
- return -EIO; \
- } \
-} while (0)
-
struct nfs4_cb_compound_hdr {
/* args */
u32 ident; /* minorversion 0 only */
@@ -144,295 +84,513 @@ struct nfs4_cb_compound_hdr {
int status;
};
-static struct {
-int stat;
-int errno;
-} nfs_cb_errtbl[] = {
- { NFS4_OK, 0 },
- { NFS4ERR_PERM, EPERM },
- { NFS4ERR_NOENT, ENOENT },
- { NFS4ERR_IO, EIO },
- { NFS4ERR_NXIO, ENXIO },
- { NFS4ERR_ACCESS, EACCES },
- { NFS4ERR_EXIST, EEXIST },
- { NFS4ERR_XDEV, EXDEV },
- { NFS4ERR_NOTDIR, ENOTDIR },
- { NFS4ERR_ISDIR, EISDIR },
- { NFS4ERR_INVAL, EINVAL },
- { NFS4ERR_FBIG, EFBIG },
- { NFS4ERR_NOSPC, ENOSPC },
- { NFS4ERR_ROFS, EROFS },
- { NFS4ERR_MLINK, EMLINK },
- { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
- { NFS4ERR_NOTEMPTY, ENOTEMPTY },
- { NFS4ERR_DQUOT, EDQUOT },
- { NFS4ERR_STALE, ESTALE },
- { NFS4ERR_BADHANDLE, EBADHANDLE },
- { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
- { NFS4ERR_NOTSUPP, ENOTSUPP },
- { NFS4ERR_TOOSMALL, ETOOSMALL },
- { NFS4ERR_SERVERFAULT, ESERVERFAULT },
- { NFS4ERR_BADTYPE, EBADTYPE },
- { NFS4ERR_LOCKED, EAGAIN },
- { NFS4ERR_RESOURCE, EREMOTEIO },
- { NFS4ERR_SYMLINK, ELOOP },
- { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
- { NFS4ERR_DEADLOCK, EDEADLK },
- { -1, EIO }
-};
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("NFS: %s prematurely hit the end of our receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
-static int
-nfs_cb_stat_to_errno(int stat)
+static __be32 *xdr_encode_empty_array(__be32 *p)
{
- int i;
- for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
- if (nfs_cb_errtbl[i].stat == stat)
- return nfs_cb_errtbl[i].errno;
- }
- /* If we cannot translate the error, the recovery routines should
- * handle it.
- * Note: remaining NFSv4 error codes have values > 10000, so should
- * not conflict with native Linux error codes.
- */
- return stat;
+ *p++ = xdr_zero;
+ return p;
}
/*
- * XDR encode
+ * Encode/decode NFSv4 CB basic data types
+ *
+ * Basic NFSv4 callback data types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section
+ * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
+ * 1 Protocol"
+ */
+
+/*
+ * nfs_cb_opnum4
+ *
+ * enum nfs_cb_opnum4 {
+ * OP_CB_GETATTR = 3,
+ * ...
+ * };
*/
+enum nfs_cb_opnum4 {
+ OP_CB_GETATTR = 3,
+ OP_CB_RECALL = 4,
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
+ OP_CB_ILLEGAL = 10044
+};
-static void
-encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
{
__be32 *p;
- RESERVE_SPACE(sizeof(stateid_t));
- WRITE32(sid->si_generation);
- WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
+ p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(op);
}
-static void
-encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
+/*
+ * nfs_fh4
+ *
+ * typedef opaque nfs_fh4<NFS4_FHSIZE>;
+ */
+static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
{
- __be32 * p;
+ u32 length = fh->fh_size;
+ __be32 *p;
- RESERVE_SPACE(16);
- WRITE32(0); /* tag length is always 0 */
- WRITE32(hdr->minorversion);
- WRITE32(hdr->ident);
- hdr->nops_p = p;
- WRITE32(hdr->nops);
+ BUG_ON(length > NFS4_FHSIZE);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, &fh->fh_base, length);
}
-static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
+/*
+ * stateid4
+ *
+ * struct stateid4 {
+ * uint32_t seqid;
+ * opaque other[12];
+ * };
+ */
+static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
{
- *hdr->nops_p = htonl(hdr->nops);
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(sid->si_generation);
+ xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
}
-static void
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
- struct nfs4_cb_compound_hdr *hdr)
+/*
+ * sessionid4
+ *
+ * typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
+ */
+static void encode_sessionid4(struct xdr_stream *xdr,
+ const struct nfsd4_session *session)
{
__be32 *p;
- int len = dp->dl_fh.fh_size;
-
- RESERVE_SPACE(4);
- WRITE32(OP_CB_RECALL);
- encode_stateid(xdr, &dp->dl_stateid);
- RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
- WRITE32(0); /* truncate optimization not implemented */
- WRITE32(len);
- WRITEMEM(&dp->dl_fh.fh_base, len);
- hdr->nops++;
+
+ p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
+ xdr_encode_opaque_fixed(p, session->se_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN);
}
-static void
-encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
- struct nfs4_cb_compound_hdr *hdr)
-{
- __be32 *p;
- struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
+/*
+ * nfsstat4
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_cb_errtbl[] = {
+ { NFS4_OK, 0 },
+ { NFS4ERR_PERM, -EPERM },
+ { NFS4ERR_NOENT, -ENOENT },
+ { NFS4ERR_IO, -EIO },
+ { NFS4ERR_NXIO, -ENXIO },
+ { NFS4ERR_ACCESS, -EACCES },
+ { NFS4ERR_EXIST, -EEXIST },
+ { NFS4ERR_XDEV, -EXDEV },
+ { NFS4ERR_NOTDIR, -ENOTDIR },
+ { NFS4ERR_ISDIR, -EISDIR },
+ { NFS4ERR_INVAL, -EINVAL },
+ { NFS4ERR_FBIG, -EFBIG },
+ { NFS4ERR_NOSPC, -ENOSPC },
+ { NFS4ERR_ROFS, -EROFS },
+ { NFS4ERR_MLINK, -EMLINK },
+ { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
+ { NFS4ERR_DQUOT, -EDQUOT },
+ { NFS4ERR_STALE, -ESTALE },
+ { NFS4ERR_BADHANDLE, -EBADHANDLE },
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_RESOURCE, -EREMOTEIO },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
+ { -1, -EIO }
+};
- if (hdr->minorversion == 0)
- return;
+/*
+ * If we cannot translate the error, the recovery routines should
+ * handle it.
+ *
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+static int nfs_cb_stat_to_errno(int status)
+{
+ int i;
- RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
+ for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
+ if (nfs_cb_errtbl[i].stat == status)
+ return nfs_cb_errtbl[i].errno;
+ }
- WRITE32(OP_CB_SEQUENCE);
- WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
- WRITE32(ses->se_cb_seq_nr);
- WRITE32(0); /* slotid, always 0 */
- WRITE32(0); /* highest slotid always 0 */
- WRITE32(0); /* cachethis always 0 */
- WRITE32(0); /* FIXME: support referring_call_lists */
- hdr->nops++;
+ dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
+ return -status;
}
-static int
-nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
+static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
+ enum nfsstat4 *status)
{
- struct xdr_stream xdrs, *xdr = &xdrs;
+ __be32 *p;
+ u32 op;
- xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
- RESERVE_SPACE(0);
+ p = xdr_inline_decode(xdr, 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ op = be32_to_cpup(p++);
+ if (unlikely(op != expected))
+ goto out_unexpected;
+ *status = be32_to_cpup(p);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+out_unexpected:
+ dprintk("NFSD: Callback server returned operation %d but "
+ "we issued a request for %d\n", op, expected);
+ return -EIO;
}
-static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
- struct nfsd4_callback *cb)
+/*
+ * CB_COMPOUND4args
+ *
+ * struct CB_COMPOUND4args {
+ * utf8str_cs tag;
+ * uint32_t minorversion;
+ * uint32_t callback_ident;
+ * nfs_cb_argop4 argarray<>;
+ * };
+*/
+static void encode_cb_compound4args(struct xdr_stream *xdr,
+ struct nfs4_cb_compound_hdr *hdr)
{
- struct xdr_stream xdr;
- struct nfs4_delegation *args = cb->cb_op;
- struct nfs4_cb_compound_hdr hdr = {
- .ident = cb->cb_clp->cl_cb_ident,
- .minorversion = cb->cb_minorversion,
- };
+ __be32 * p;
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_cb_compound_hdr(&xdr, &hdr);
- encode_cb_sequence(&xdr, cb, &hdr);
- encode_cb_recall(&xdr, args, &hdr);
- encode_cb_nops(&hdr);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+ p = xdr_encode_empty_array(p); /* empty tag */
+ *p++ = cpu_to_be32(hdr->minorversion);
+ *p++ = cpu_to_be32(hdr->ident);
+
+ hdr->nops_p = p;
+ *p = cpu_to_be32(hdr->nops); /* argarray element count */
+}
+
+/*
+ * Update argarray element count
+ */
+static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
+{
+ BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
+ *hdr->nops_p = cpu_to_be32(hdr->nops);
+}
+
+/*
+ * CB_COMPOUND4res
+ *
+ * struct CB_COMPOUND4res {
+ * nfsstat4 status;
+ * utf8str_cs tag;
+ * nfs_cb_resop4 resarray<>;
+ * };
+ */
+static int decode_cb_compound4res(struct xdr_stream *xdr,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ u32 length;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ hdr->status = be32_to_cpup(p++);
+ /* Ignore the tag */
+ length = be32_to_cpup(p++);
+ p = xdr_inline_decode(xdr, length + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ hdr->nops = be32_to_cpup(p);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
+/*
+ * CB_RECALL4args
+ *
+ * struct CB_RECALL4args {
+ * stateid4 stateid;
+ * bool truncate;
+ * nfs_fh4 fh;
+ * };
+ */
+static void encode_cb_recall4args(struct xdr_stream *xdr,
+ const struct nfs4_delegation *dp,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
+ encode_stateid4(xdr, &dp->dl_stateid);
+
+ p = xdr_reserve_space(xdr, 4);
+ *p++ = xdr_zero; /* truncate */
-static int
-decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
- __be32 *p;
- u32 taglen;
+ encode_nfs_fh4(xdr, &dp->dl_fh);
- READ_BUF(8);
- READ32(hdr->status);
- /* We've got no use for the tag; ignore it: */
- READ32(taglen);
- READ_BUF(taglen + 4);
- p += XDR_QUADLEN(taglen);
- READ32(hdr->nops);
- return 0;
+ hdr->nops++;
}
-static int
-decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+/*
+ * CB_SEQUENCE4args
+ *
+ * struct CB_SEQUENCE4args {
+ * sessionid4 csa_sessionid;
+ * sequenceid4 csa_sequenceid;
+ * slotid4 csa_slotid;
+ * slotid4 csa_highest_slotid;
+ * bool csa_cachethis;
+ * referring_call_list4 csa_referring_call_lists<>;
+ * };
+ */
+static void encode_cb_sequence4args(struct xdr_stream *xdr,
+ const struct nfsd4_callback *cb,
+ struct nfs4_cb_compound_hdr *hdr)
{
+ struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
__be32 *p;
- u32 op;
- int32_t nfserr;
-
- READ_BUF(8);
- READ32(op);
- if (op != expected) {
- dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
- " operation %d but we issued a request for %d\n",
- op, expected);
- return -EIO;
- }
- READ32(nfserr);
- if (nfserr != NFS_OK)
- return -nfs_cb_stat_to_errno(nfserr);
- return 0;
+
+ if (hdr->minorversion == 0)
+ return;
+
+ encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
+ encode_sessionid4(xdr, session);
+
+ p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
+ *p++ = cpu_to_be32(session->se_cb_seq_nr); /* csa_sequenceid */
+ *p++ = xdr_zero; /* csa_slotid */
+ *p++ = xdr_zero; /* csa_highest_slotid */
+ *p++ = xdr_zero; /* csa_cachethis */
+ xdr_encode_empty_array(p); /* csa_referring_call_lists */
+
+ hdr->nops++;
}
/*
+ * CB_SEQUENCE4resok
+ *
+ * struct CB_SEQUENCE4resok {
+ * sessionid4 csr_sessionid;
+ * sequenceid4 csr_sequenceid;
+ * slotid4 csr_slotid;
+ * slotid4 csr_highest_slotid;
+ * slotid4 csr_target_highest_slotid;
+ * };
+ *
+ * union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
+ * case NFS4_OK:
+ * CB_SEQUENCE4resok csr_resok4;
+ * default:
+ * void;
+ * };
+ *
* Our current back channel implmentation supports a single backchannel
* with a single slot.
*/
-static int
-decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
- struct rpc_rqst *rqstp)
+static int decode_cb_sequence4resok(struct xdr_stream *xdr,
+ struct nfsd4_callback *cb)
{
- struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
+ struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
struct nfs4_sessionid id;
int status;
- u32 dummy;
__be32 *p;
+ u32 dummy;
- if (cb->cb_minorversion == 0)
- return 0;
-
- status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
- if (status)
- return status;
+ status = -ESERVERFAULT;
/*
* If the server returns different values for sessionID, slotID or
* sequence number, the server is looney tunes.
*/
- status = -ESERVERFAULT;
-
- READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
+ p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
- p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
- if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
- dprintk("%s Invalid session id\n", __func__);
+ if (memcmp(id.data, session->se_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN) != 0) {
+ dprintk("NFS: %s Invalid session id\n", __func__);
goto out;
}
- READ32(dummy);
- if (dummy != ses->se_cb_seq_nr) {
- dprintk("%s Invalid sequence number\n", __func__);
+ p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
+
+ dummy = be32_to_cpup(p++);
+ if (dummy != session->se_cb_seq_nr) {
+ dprintk("NFS: %s Invalid sequence number\n", __func__);
goto out;
}
- READ32(dummy); /* slotid must be 0 */
+
+ dummy = be32_to_cpup(p++);
if (dummy != 0) {
- dprintk("%s Invalid slotid\n", __func__);
+ dprintk("NFS: %s Invalid slotid\n", __func__);
goto out;
}
- /* FIXME: process highest slotid and target highest slotid */
+
+ /*
+ * FIXME: process highest slotid and target highest slotid
+ */
status = 0;
out:
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
+static int decode_cb_sequence4res(struct xdr_stream *xdr,
+ struct nfsd4_callback *cb)
+{
+ enum nfsstat4 nfserr;
+ int status;
+
+ if (cb->cb_minorversion == 0)
+ return 0;
+
+ status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
+ if (unlikely(status))
+ goto out;
+ if (unlikely(nfserr != NFS4_OK))
+ goto out_default;
+ status = decode_cb_sequence4resok(xdr, cb);
+out:
+ return status;
+out_default:
+ return nfs_cb_stat_to_errno(status);
+}
-static int
-nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
+/*
+ * NFSv4.0 and NFSv4.1 XDR encode functions
+ *
+ * NFSv4.0 callback argument types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+/*
+ * NB: Without this zero space reservation, callbacks over krb5p fail
+ */
+static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+ void *__unused)
+{
+ xdr_reserve_space(xdr, 0);
+}
+
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfsd4_callback *cb)
+{
+ const struct nfs4_delegation *args = cb->cb_op;
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = cb->cb_clp->cl_cb_ident,
+ .minorversion = cb->cb_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_recall4args(xdr, args, &hdr);
+ encode_cb_nops(&hdr);
+}
+
+
+/*
+ * NFSv4.0 and NFSv4.1 XDR decode functions
+ *
+ * NFSv4.0 callback result types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+ void *__unused)
{
return 0;
}
-static int
-nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
- struct nfsd4_callback *cb)
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfsd4_callback *cb)
{
- struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr;
+ enum nfsstat4 nfserr;
int status;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- status = decode_cb_compound_hdr(&xdr, &hdr);
- if (status)
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
goto out;
- if (cb) {
- status = decode_cb_sequence(&xdr, cb, rqstp);
- if (status)
+
+ if (cb != NULL) {
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status))
goto out;
}
- status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
+
+ status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
+ if (unlikely(status))
+ goto out;
+ if (unlikely(nfserr != NFS4_OK))
+ goto out_default;
out:
return status;
+out_default:
+ return nfs_cb_stat_to_errno(status);
}
/*
* RPC procedure tables
*/
-#define PROC(proc, call, argtype, restype) \
-[NFSPROC4_CLNT_##proc] = { \
- .p_proc = NFSPROC4_CB_##call, \
- .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
- .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
- .p_arglen = NFS4_##argtype##_sz, \
- .p_replen = NFS4_##restype##_sz, \
- .p_statidx = NFSPROC4_CB_##call, \
- .p_name = #proc, \
-}
-
-static struct rpc_procinfo nfs4_cb_procedures[] = {
- PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null),
- PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall),
+#define PROC(proc, call, argtype, restype) \
+[NFSPROC4_CLNT_##proc] = { \
+ .p_proc = NFSPROC4_CB_##call, \
+ .p_encode = (kxdreproc_t)nfs4_xdr_enc_##argtype, \
+ .p_decode = (kxdrdproc_t)nfs4_xdr_dec_##restype, \
+ .p_arglen = NFS4_enc_##argtype##_sz, \
+ .p_replen = NFS4_dec_##restype##_sz, \
+ .p_statidx = NFSPROC4_CB_##call, \
+ .p_name = #proc, \
+}
+
+static struct rpc_procinfo nfs4_cb_procedures[] = {
+ PROC(CB_NULL, NULL, cb_null, cb_null),
+ PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
};
-static struct rpc_version nfs_cb_version4 = {
+static struct rpc_version nfs_cb_version4 = {
/*
* Note on the callback rpc program version number: despite language in rfc
* 5661 section 18.36.3 requiring servers to use 4 in this field, the
@@ -440,29 +598,29 @@ static struct rpc_version nfs_cb_version4 = {
* in practice that appears to be what implementations use. The section
* 18.36.3 language is expected to be fixed in an erratum.
*/
- .number = 1,
- .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
- .procs = nfs4_cb_procedures
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
+ .procs = nfs4_cb_procedures
};
-static struct rpc_version * nfs_cb_version[] = {
+static struct rpc_version *nfs_cb_version[] = {
&nfs_cb_version4,
};
static struct rpc_program cb_program;
static struct rpc_stat cb_stats = {
- .program = &cb_program
+ .program = &cb_program
};
#define NFS4_CALLBACK 0x40000000
static struct rpc_program cb_program = {
- .name = "nfs4_cb",
- .number = NFS4_CALLBACK,
- .nrvers = ARRAY_SIZE(nfs_cb_version),
- .version = nfs_cb_version,
- .stats = &cb_stats,
- .pipe_dir_name = "/nfsd4_cb",
+ .name = "nfs4_cb",
+ .number = NFS4_CALLBACK,
+ .nrvers = ARRAY_SIZE(nfs_cb_version),
+ .version = nfs_cb_version,
+ .stats = &cb_stats,
+ .pipe_dir_name = "/nfsd4_cb",
};
static int max_cb_time(void)
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 0d840669698..ab152c00cd3 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -51,7 +51,7 @@ config OCFS2_FS_USERSPACE_CLUSTER
config OCFS2_FS_STATS
bool "OCFS2 statistics"
- depends on OCFS2_FS
+ depends on OCFS2_FS && DEBUG_FS
default y
help
This option allows some fs statistics to be captured. Enabling
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 592fae5007d..e4984e259cb 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -565,7 +565,6 @@ static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et)
return ret;
}
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
struct ocfs2_extent_block *eb);
static void ocfs2_adjust_rightmost_records(handle_t *handle,
@@ -5858,6 +5857,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
ocfs2_journal_dirty(handle, tl_bh);
+ osb->truncated_clusters += num_clusters;
bail:
mlog_exit(status);
return status;
@@ -5929,6 +5929,8 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
i--;
}
+ osb->truncated_clusters = 0;
+
bail:
mlog_exit(status);
return status;
@@ -7139,64 +7141,6 @@ bail:
}
/*
- * Expects the inode to already be locked.
- */
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
- struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_truncate_context **tc)
-{
- int status;
- unsigned int new_i_clusters;
- struct ocfs2_dinode *fe;
- struct ocfs2_extent_block *eb;
- struct buffer_head *last_eb_bh = NULL;
-
- mlog_entry_void();
-
- *tc = NULL;
-
- new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
- i_size_read(inode));
- fe = (struct ocfs2_dinode *) fe_bh->b_data;
-
- mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
- "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
- (unsigned long long)le64_to_cpu(fe->i_size));
-
- *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
- if (!(*tc)) {
- status = -ENOMEM;
- mlog_errno(status);
- goto bail;
- }
- ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
-
- if (fe->id2.i_list.l_tree_depth) {
- status = ocfs2_read_extent_block(INODE_CACHE(inode),
- le64_to_cpu(fe->i_last_eb_blk),
- &last_eb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
- }
-
- (*tc)->tc_last_eb_bh = last_eb_bh;
-
- status = 0;
-bail:
- if (status < 0) {
- if (*tc)
- ocfs2_free_truncate_context(*tc);
- *tc = NULL;
- }
- mlog_exit_void();
- return status;
-}
-
-/*
* 'start' is inclusive, 'end' is not.
*/
int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
@@ -7270,18 +7214,3 @@ out_commit:
out:
return ret;
}
-
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
-{
- /*
- * The caller is responsible for completing deallocation
- * before freeing the context.
- */
- if (tc->tc_dealloc.c_first_suballocator != NULL)
- mlog(ML_NOTICE,
- "Truncate completion has non-empty dealloc context\n");
-
- brelse(tc->tc_last_eb_bh);
-
- kfree(tc);
-}
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 55762b554b9..3bd08a03251 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -228,10 +228,6 @@ struct ocfs2_truncate_context {
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
u64 range_start, u64 range_end);
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
- struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_truncate_context **tc);
int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d7c5540ad6..1fbb0e20131 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1630,6 +1630,43 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
return ret;
}
+/*
+ * Try to flush truncate logs if we can free enough clusters from it.
+ * As for return value, "< 0" means error, "0" no space and "1" means
+ * we have freed enough spaces and let the caller try to allocate again.
+ */
+static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+ unsigned int needed)
+{
+ tid_t target;
+ int ret = 0;
+ unsigned int truncated_clusters;
+
+ mutex_lock(&osb->osb_tl_inode->i_mutex);
+ truncated_clusters = osb->truncated_clusters;
+ mutex_unlock(&osb->osb_tl_inode->i_mutex);
+
+ /*
+ * Check whether we can succeed in allocating if we free
+ * the truncate log.
+ */
+ if (truncated_clusters < needed)
+ goto out;
+
+ ret = ocfs2_flush_truncate_log(osb);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
+ jbd2_log_wait_commit(osb->journal->j_journal, target);
+ ret = 1;
+ }
+out:
+ return ret;
+}
+
int ocfs2_write_begin_nolock(struct file *filp,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -1637,7 +1674,7 @@ int ocfs2_write_begin_nolock(struct file *filp,
struct buffer_head *di_bh, struct page *mmap_page)
{
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
- unsigned int clusters_to_alloc, extents_to_split;
+ unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
struct ocfs2_write_ctxt *wc;
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -1646,7 +1683,9 @@ int ocfs2_write_begin_nolock(struct file *filp,
struct ocfs2_alloc_context *meta_ac = NULL;
handle_t *handle;
struct ocfs2_extent_tree et;
+ int try_free = 1, ret1;
+try_again:
ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
if (ret) {
mlog_errno(ret);
@@ -1681,6 +1720,7 @@ int ocfs2_write_begin_nolock(struct file *filp,
mlog_errno(ret);
goto out;
} else if (ret == 1) {
+ clusters_need = wc->w_clen;
ret = ocfs2_refcount_cow(inode, filp, di_bh,
wc->w_cpos, wc->w_clen, UINT_MAX);
if (ret) {
@@ -1695,6 +1735,7 @@ int ocfs2_write_begin_nolock(struct file *filp,
mlog_errno(ret);
goto out;
}
+ clusters_need += clusters_to_alloc;
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
@@ -1817,6 +1858,22 @@ out:
ocfs2_free_alloc_context(data_ac);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
+
+ if (ret == -ENOSPC && try_free) {
+ /*
+ * Try to free some truncate log so that we can have enough
+ * clusters to allocate.
+ */
+ try_free = 0;
+
+ ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
+ if (ret1 == 1)
+ goto try_again;
+
+ if (ret1 < 0)
+ mlog_errno(ret1);
+ }
+
return ret;
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9e3d45bcb5f..a6cc05302e9 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -82,6 +82,7 @@ static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
#define O2HB_DB_TYPE_REGION_LIVENODES 4
#define O2HB_DB_TYPE_REGION_NUMBER 5
#define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6
+#define O2HB_DB_TYPE_REGION_PINNED 7
struct o2hb_debug_buf {
int db_type;
int db_size;
@@ -101,6 +102,7 @@ static struct o2hb_debug_buf *o2hb_db_failedregions;
#define O2HB_DEBUG_FAILEDREGIONS "failed_regions"
#define O2HB_DEBUG_REGION_NUMBER "num"
#define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms"
+#define O2HB_DEBUG_REGION_PINNED "pinned"
static struct dentry *o2hb_debug_dir;
static struct dentry *o2hb_debug_livenodes;
@@ -132,6 +134,33 @@ char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = {
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
+/*
+ * o2hb_dependent_users tracks the number of registered callbacks that depend
+ * on heartbeat. o2net and o2dlm are two entities that register this callback.
+ * However only o2dlm depends on the heartbeat. It does not want the heartbeat
+ * to stop while a dlm domain is still active.
+ */
+unsigned int o2hb_dependent_users;
+
+/*
+ * In global heartbeat mode, all regions are pinned if there are one or more
+ * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All
+ * regions are unpinned if the region count exceeds the cut off or the number
+ * of dependent users falls to zero.
+ */
+#define O2HB_PIN_CUT_OFF 3
+
+/*
+ * In local heartbeat mode, we assume the dlm domain name to be the same as
+ * region uuid. This is true for domains created for the file system but not
+ * necessarily true for userdlm domains. This is a known limitation.
+ *
+ * In global heartbeat mode, we pin/unpin all o2hb regions. This solution
+ * works for both file system and userdlm domains.
+ */
+static int o2hb_region_pin(const char *region_uuid);
+static void o2hb_region_unpin(const char *region_uuid);
+
/* Only sets a new threshold if there are no active regions.
*
* No locking or otherwise interesting code is required for reading
@@ -186,7 +215,9 @@ struct o2hb_region {
struct config_item hr_item;
struct list_head hr_all_item;
- unsigned hr_unclean_stop:1;
+ unsigned hr_unclean_stop:1,
+ hr_item_pinned:1,
+ hr_item_dropped:1;
/* protected by the hr_callback_sem */
struct task_struct *hr_task;
@@ -212,9 +243,11 @@ struct o2hb_region {
struct dentry *hr_debug_livenodes;
struct dentry *hr_debug_regnum;
struct dentry *hr_debug_elapsed_time;
+ struct dentry *hr_debug_pinned;
struct o2hb_debug_buf *hr_db_livenodes;
struct o2hb_debug_buf *hr_db_regnum;
struct o2hb_debug_buf *hr_db_elapsed_time;
+ struct o2hb_debug_buf *hr_db_pinned;
/* let the person setting up hb wait for it to return until it
* has reached a 'steady' state. This will be fixed when we have
@@ -701,6 +734,14 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
config_item_name(&reg->hr_item));
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+
+ /*
+ * If global heartbeat active, unpin all regions if the
+ * region count > CUT_OFF
+ */
+ if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+ O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
+ o2hb_region_unpin(NULL);
}
static int o2hb_check_slot(struct o2hb_region *reg,
@@ -1041,6 +1082,9 @@ static int o2hb_thread(void *data)
set_user_nice(current, -20);
+ /* Pin node */
+ o2nm_depend_this_node();
+
while (!kthread_should_stop() && !reg->hr_unclean_stop) {
/* We track the time spent inside
* o2hb_do_disk_heartbeat so that we avoid more than
@@ -1090,6 +1134,9 @@ static int o2hb_thread(void *data)
mlog_errno(ret);
}
+ /* Unpin node */
+ o2nm_undepend_this_node();
+
mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
return 0;
@@ -1142,6 +1189,12 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
reg->hr_last_timeout_start));
goto done;
+ case O2HB_DB_TYPE_REGION_PINNED:
+ reg = (struct o2hb_region *)db->db_data;
+ out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
+ !!reg->hr_item_pinned);
+ goto done;
+
default:
goto done;
}
@@ -1315,6 +1368,8 @@ int o2hb_init(void)
memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap));
memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap));
+ o2hb_dependent_users = 0;
+
return o2hb_debug_init();
}
@@ -1384,6 +1439,7 @@ static void o2hb_region_release(struct config_item *item)
debugfs_remove(reg->hr_debug_livenodes);
debugfs_remove(reg->hr_debug_regnum);
debugfs_remove(reg->hr_debug_elapsed_time);
+ debugfs_remove(reg->hr_debug_pinned);
debugfs_remove(reg->hr_debug_dir);
spin_lock(&o2hb_live_lock);
@@ -1948,6 +2004,18 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
goto bail;
}
+ reg->hr_debug_pinned =
+ o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
+ reg->hr_debug_dir,
+ &(reg->hr_db_pinned),
+ sizeof(*(reg->hr_db_pinned)),
+ O2HB_DB_TYPE_REGION_PINNED,
+ 0, 0, reg);
+ if (!reg->hr_debug_pinned) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
ret = 0;
bail:
return ret;
@@ -2002,15 +2070,20 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
{
struct task_struct *hb_task;
struct o2hb_region *reg = to_o2hb_region(item);
+ int quorum_region = 0;
/* stop the thread when the user removes the region dir */
spin_lock(&o2hb_live_lock);
if (o2hb_global_heartbeat_active()) {
clear_bit(reg->hr_region_num, o2hb_region_bitmap);
clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+ if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ quorum_region = 1;
+ clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
}
hb_task = reg->hr_task;
reg->hr_task = NULL;
+ reg->hr_item_dropped = 1;
spin_unlock(&o2hb_live_lock);
if (hb_task)
@@ -2028,7 +2101,27 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
if (o2hb_global_heartbeat_active())
printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
config_item_name(&reg->hr_item));
+
config_item_put(item);
+
+ if (!o2hb_global_heartbeat_active() || !quorum_region)
+ return;
+
+ /*
+ * If global heartbeat active and there are dependent users,
+ * pin all regions if quorum region count <= CUT_OFF
+ */
+ spin_lock(&o2hb_live_lock);
+
+ if (!o2hb_dependent_users)
+ goto unlock;
+
+ if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+ O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+ o2hb_region_pin(NULL);
+
+unlock:
+ spin_unlock(&o2hb_live_lock);
}
struct o2hb_heartbeat_group_attribute {
@@ -2214,63 +2307,138 @@ void o2hb_setup_callback(struct o2hb_callback_func *hc,
}
EXPORT_SYMBOL_GPL(o2hb_setup_callback);
-static struct o2hb_region *o2hb_find_region(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only pin the matching region. In global we pin all the active
+ * regions.
+ */
+static int o2hb_region_pin(const char *region_uuid)
{
- struct o2hb_region *p, *reg = NULL;
+ int ret = 0, found = 0;
+ struct o2hb_region *reg;
+ char *uuid;
assert_spin_locked(&o2hb_live_lock);
- list_for_each_entry(p, &o2hb_all_regions, hr_all_item) {
- if (!strcmp(region_uuid, config_item_name(&p->hr_item))) {
- reg = p;
- break;
+ list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+ uuid = config_item_name(&reg->hr_item);
+
+ /* local heartbeat */
+ if (region_uuid) {
+ if (strcmp(region_uuid, uuid))
+ continue;
+ found = 1;
+ }
+
+ if (reg->hr_item_pinned || reg->hr_item_dropped)
+ goto skip_pin;
+
+ /* Ignore ENOENT only for local hb (userdlm domain) */
+ ret = o2nm_depend_item(&reg->hr_item);
+ if (!ret) {
+ mlog(ML_CLUSTER, "Pin region %s\n", uuid);
+ reg->hr_item_pinned = 1;
+ } else {
+ if (ret == -ENOENT && found)
+ ret = 0;
+ else {
+ mlog(ML_ERROR, "Pin region %s fails with %d\n",
+ uuid, ret);
+ break;
+ }
}
+skip_pin:
+ if (found)
+ break;
}
- return reg;
+ return ret;
}
-static int o2hb_region_get(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only unpin the matching region. In global we unpin all the
+ * active regions.
+ */
+static void o2hb_region_unpin(const char *region_uuid)
{
- int ret = 0;
struct o2hb_region *reg;
+ char *uuid;
+ int found = 0;
- spin_lock(&o2hb_live_lock);
+ assert_spin_locked(&o2hb_live_lock);
- reg = o2hb_find_region(region_uuid);
- if (!reg)
- ret = -ENOENT;
- spin_unlock(&o2hb_live_lock);
+ list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+ uuid = config_item_name(&reg->hr_item);
+ if (region_uuid) {
+ if (strcmp(region_uuid, uuid))
+ continue;
+ found = 1;
+ }
- if (ret)
- goto out;
+ if (reg->hr_item_pinned) {
+ mlog(ML_CLUSTER, "Unpin region %s\n", uuid);
+ o2nm_undepend_item(&reg->hr_item);
+ reg->hr_item_pinned = 0;
+ }
+ if (found)
+ break;
+ }
+}
- ret = o2nm_depend_this_node();
- if (ret)
- goto out;
+static int o2hb_region_inc_user(const char *region_uuid)
+{
+ int ret = 0;
- ret = o2nm_depend_item(&reg->hr_item);
- if (ret)
- o2nm_undepend_this_node();
+ spin_lock(&o2hb_live_lock);
-out:
+ /* local heartbeat */
+ if (!o2hb_global_heartbeat_active()) {
+ ret = o2hb_region_pin(region_uuid);
+ goto unlock;
+ }
+
+ /*
+ * if global heartbeat active and this is the first dependent user,
+ * pin all regions if quorum region count <= CUT_OFF
+ */
+ o2hb_dependent_users++;
+ if (o2hb_dependent_users > 1)
+ goto unlock;
+
+ if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+ O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+ ret = o2hb_region_pin(NULL);
+
+unlock:
+ spin_unlock(&o2hb_live_lock);
return ret;
}
-static void o2hb_region_put(const char *region_uuid)
+void o2hb_region_dec_user(const char *region_uuid)
{
- struct o2hb_region *reg;
-
spin_lock(&o2hb_live_lock);
- reg = o2hb_find_region(region_uuid);
+ /* local heartbeat */
+ if (!o2hb_global_heartbeat_active()) {
+ o2hb_region_unpin(region_uuid);
+ goto unlock;
+ }
- spin_unlock(&o2hb_live_lock);
+ /*
+ * if global heartbeat active and there are no dependent users,
+ * unpin all quorum regions
+ */
+ o2hb_dependent_users--;
+ if (!o2hb_dependent_users)
+ o2hb_region_unpin(NULL);
- if (reg) {
- o2nm_undepend_item(&reg->hr_item);
- o2nm_undepend_this_node();
- }
+unlock:
+ spin_unlock(&o2hb_live_lock);
}
int o2hb_register_callback(const char *region_uuid,
@@ -2291,9 +2459,11 @@ int o2hb_register_callback(const char *region_uuid,
}
if (region_uuid) {
- ret = o2hb_region_get(region_uuid);
- if (ret)
+ ret = o2hb_region_inc_user(region_uuid);
+ if (ret) {
+ mlog_errno(ret);
goto out;
+ }
}
down_write(&o2hb_callback_sem);
@@ -2311,7 +2481,7 @@ int o2hb_register_callback(const char *region_uuid,
up_write(&o2hb_callback_sem);
ret = 0;
out:
- mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n",
+ mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
ret, __builtin_return_address(0), hc);
return ret;
}
@@ -2322,7 +2492,7 @@ void o2hb_unregister_callback(const char *region_uuid,
{
BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
- mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n",
+ mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
__builtin_return_address(0), hc);
/* XXX Can this happen _with_ a region reference? */
@@ -2330,7 +2500,7 @@ void o2hb_unregister_callback(const char *region_uuid,
return;
if (region_uuid)
- o2hb_region_put(region_uuid);
+ o2hb_region_dec_user(region_uuid);
down_write(&o2hb_callback_sem);
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index a3f150e52b0..3a5835904b3 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -46,10 +46,15 @@
#define O2NET_DEBUG_DIR "o2net"
#define SC_DEBUG_NAME "sock_containers"
#define NST_DEBUG_NAME "send_tracking"
+#define STATS_DEBUG_NAME "stats"
+
+#define SHOW_SOCK_CONTAINERS 0
+#define SHOW_SOCK_STATS 1
static struct dentry *o2net_dentry;
static struct dentry *sc_dentry;
static struct dentry *nst_dentry;
+static struct dentry *stats_dentry;
static DEFINE_SPINLOCK(o2net_debug_lock);
@@ -123,37 +128,42 @@ static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static int nst_seq_show(struct seq_file *seq, void *v)
{
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
+ ktime_t now;
+ s64 sock, send, status;
spin_lock(&o2net_debug_lock);
nst = next_nst(dummy_nst);
+ if (!nst)
+ goto out;
- if (nst != NULL) {
- /* get_task_comm isn't exported. oh well. */
- seq_printf(seq, "%p:\n"
- " pid: %lu\n"
- " tgid: %lu\n"
- " process name: %s\n"
- " node: %u\n"
- " sc: %p\n"
- " message id: %d\n"
- " message type: %u\n"
- " message key: 0x%08x\n"
- " sock acquiry: %lu.%ld\n"
- " send start: %lu.%ld\n"
- " wait start: %lu.%ld\n",
- nst, (unsigned long)nst->st_task->pid,
- (unsigned long)nst->st_task->tgid,
- nst->st_task->comm, nst->st_node,
- nst->st_sc, nst->st_id, nst->st_msg_type,
- nst->st_msg_key,
- nst->st_sock_time.tv_sec,
- (long)nst->st_sock_time.tv_usec,
- nst->st_send_time.tv_sec,
- (long)nst->st_send_time.tv_usec,
- nst->st_status_time.tv_sec,
- (long)nst->st_status_time.tv_usec);
- }
+ now = ktime_get();
+ sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
+ send = ktime_to_us(ktime_sub(now, nst->st_send_time));
+ status = ktime_to_us(ktime_sub(now, nst->st_status_time));
+
+ /* get_task_comm isn't exported. oh well. */
+ seq_printf(seq, "%p:\n"
+ " pid: %lu\n"
+ " tgid: %lu\n"
+ " process name: %s\n"
+ " node: %u\n"
+ " sc: %p\n"
+ " message id: %d\n"
+ " message type: %u\n"
+ " message key: 0x%08x\n"
+ " sock acquiry: %lld usecs ago\n"
+ " send start: %lld usecs ago\n"
+ " wait start: %lld usecs ago\n",
+ nst, (unsigned long)task_pid_nr(nst->st_task),
+ (unsigned long)nst->st_task->tgid,
+ nst->st_task->comm, nst->st_node,
+ nst->st_sc, nst->st_id, nst->st_msg_type,
+ nst->st_msg_key,
+ (long long)sock,
+ (long long)send,
+ (long long)status);
+out:
spin_unlock(&o2net_debug_lock);
return 0;
@@ -228,6 +238,11 @@ void o2net_debug_del_sc(struct o2net_sock_container *sc)
spin_unlock(&o2net_debug_lock);
}
+struct o2net_sock_debug {
+ int dbg_ctxt;
+ struct o2net_sock_container *dbg_sock;
+};
+
static struct o2net_sock_container
*next_sc(struct o2net_sock_container *sc_start)
{
@@ -253,7 +268,8 @@ static struct o2net_sock_container
static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct o2net_sock_container *sc, *dummy_sc = seq->private;
+ struct o2net_sock_debug *sd = seq->private;
+ struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock(&o2net_debug_lock);
sc = next_sc(dummy_sc);
@@ -264,7 +280,8 @@ static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct o2net_sock_container *sc, *dummy_sc = seq->private;
+ struct o2net_sock_debug *sd = seq->private;
+ struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock(&o2net_debug_lock);
sc = next_sc(dummy_sc);
@@ -276,65 +293,107 @@ static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return sc; /* unused, just needs to be null when done */
}
-#define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec
+#ifdef CONFIG_OCFS2_FS_STATS
+# define sc_send_count(_s) ((_s)->sc_send_count)
+# define sc_recv_count(_s) ((_s)->sc_recv_count)
+# define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total))
+# define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total))
+# define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total))
+# define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total))
+#else
+# define sc_send_count(_s) (0U)
+# define sc_recv_count(_s) (0U)
+# define sc_tv_acquiry_total_ns(_s) (0LL)
+# define sc_tv_send_total_ns(_s) (0LL)
+# define sc_tv_status_total_ns(_s) (0LL)
+# define sc_tv_process_total_ns(_s) (0LL)
+#endif
+
+/* So that debugfs.ocfs2 can determine which format is being used */
+#define O2NET_STATS_STR_VERSION 1
+static void sc_show_sock_stats(struct seq_file *seq,
+ struct o2net_sock_container *sc)
+{
+ if (!sc)
+ return;
+
+ seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
+ sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
+ (long long)sc_tv_acquiry_total_ns(sc),
+ (long long)sc_tv_send_total_ns(sc),
+ (long long)sc_tv_status_total_ns(sc),
+ (unsigned long)sc_recv_count(sc),
+ (long long)sc_tv_process_total_ns(sc));
+}
+
+static void sc_show_sock_container(struct seq_file *seq,
+ struct o2net_sock_container *sc)
+{
+ struct inet_sock *inet = NULL;
+ __be32 saddr = 0, daddr = 0;
+ __be16 sport = 0, dport = 0;
+
+ if (!sc)
+ return;
+
+ if (sc->sc_sock) {
+ inet = inet_sk(sc->sc_sock->sk);
+ /* the stack's structs aren't sparse endian clean */
+ saddr = (__force __be32)inet->inet_saddr;
+ daddr = (__force __be32)inet->inet_daddr;
+ sport = (__force __be16)inet->inet_sport;
+ dport = (__force __be16)inet->inet_dport;
+ }
+
+ /* XXX sigh, inet-> doesn't have sparse annotation so any
+ * use of it here generates a warning with -Wbitwise */
+ seq_printf(seq, "%p:\n"
+ " krefs: %d\n"
+ " sock: %pI4:%u -> "
+ "%pI4:%u\n"
+ " remote node: %s\n"
+ " page off: %zu\n"
+ " handshake ok: %u\n"
+ " timer: %lld usecs\n"
+ " data ready: %lld usecs\n"
+ " advance start: %lld usecs\n"
+ " advance stop: %lld usecs\n"
+ " func start: %lld usecs\n"
+ " func stop: %lld usecs\n"
+ " func key: 0x%08x\n"
+ " func type: %u\n",
+ sc,
+ atomic_read(&sc->sc_kref.refcount),
+ &saddr, inet ? ntohs(sport) : 0,
+ &daddr, inet ? ntohs(dport) : 0,
+ sc->sc_node->nd_name,
+ sc->sc_page_off,
+ sc->sc_handshake_ok,
+ (long long)ktime_to_us(sc->sc_tv_timer),
+ (long long)ktime_to_us(sc->sc_tv_data_ready),
+ (long long)ktime_to_us(sc->sc_tv_advance_start),
+ (long long)ktime_to_us(sc->sc_tv_advance_stop),
+ (long long)ktime_to_us(sc->sc_tv_func_start),
+ (long long)ktime_to_us(sc->sc_tv_func_stop),
+ sc->sc_msg_key,
+ sc->sc_msg_type);
+}
static int sc_seq_show(struct seq_file *seq, void *v)
{
- struct o2net_sock_container *sc, *dummy_sc = seq->private;
+ struct o2net_sock_debug *sd = seq->private;
+ struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock(&o2net_debug_lock);
sc = next_sc(dummy_sc);
- if (sc != NULL) {
- struct inet_sock *inet = NULL;
-
- __be32 saddr = 0, daddr = 0;
- __be16 sport = 0, dport = 0;
-
- if (sc->sc_sock) {
- inet = inet_sk(sc->sc_sock->sk);
- /* the stack's structs aren't sparse endian clean */
- saddr = (__force __be32)inet->inet_saddr;
- daddr = (__force __be32)inet->inet_daddr;
- sport = (__force __be16)inet->inet_sport;
- dport = (__force __be16)inet->inet_dport;
- }
-
- /* XXX sigh, inet-> doesn't have sparse annotation so any
- * use of it here generates a warning with -Wbitwise */
- seq_printf(seq, "%p:\n"
- " krefs: %d\n"
- " sock: %pI4:%u -> "
- "%pI4:%u\n"
- " remote node: %s\n"
- " page off: %zu\n"
- " handshake ok: %u\n"
- " timer: %lu.%ld\n"
- " data ready: %lu.%ld\n"
- " advance start: %lu.%ld\n"
- " advance stop: %lu.%ld\n"
- " func start: %lu.%ld\n"
- " func stop: %lu.%ld\n"
- " func key: %u\n"
- " func type: %u\n",
- sc,
- atomic_read(&sc->sc_kref.refcount),
- &saddr, inet ? ntohs(sport) : 0,
- &daddr, inet ? ntohs(dport) : 0,
- sc->sc_node->nd_name,
- sc->sc_page_off,
- sc->sc_handshake_ok,
- TV_SEC_USEC(sc->sc_tv_timer),
- TV_SEC_USEC(sc->sc_tv_data_ready),
- TV_SEC_USEC(sc->sc_tv_advance_start),
- TV_SEC_USEC(sc->sc_tv_advance_stop),
- TV_SEC_USEC(sc->sc_tv_func_start),
- TV_SEC_USEC(sc->sc_tv_func_stop),
- sc->sc_msg_key,
- sc->sc_msg_type);
+ if (sc) {
+ if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
+ sc_show_sock_container(seq, sc);
+ else
+ sc_show_sock_stats(seq, sc);
}
-
spin_unlock(&o2net_debug_lock);
return 0;
@@ -351,7 +410,7 @@ static const struct seq_operations sc_seq_ops = {
.show = sc_seq_show,
};
-static int sc_fop_open(struct inode *inode, struct file *file)
+static int sc_common_open(struct file *file, struct o2net_sock_debug *sd)
{
struct o2net_sock_container *dummy_sc;
struct seq_file *seq;
@@ -369,7 +428,8 @@ static int sc_fop_open(struct inode *inode, struct file *file)
goto out;
seq = file->private_data;
- seq->private = dummy_sc;
+ seq->private = sd;
+ sd->dbg_sock = dummy_sc;
o2net_debug_add_sc(dummy_sc);
dummy_sc = NULL;
@@ -382,12 +442,48 @@ out:
static int sc_fop_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
- struct o2net_sock_container *dummy_sc = seq->private;
+ struct o2net_sock_debug *sd = seq->private;
+ struct o2net_sock_container *dummy_sc = sd->dbg_sock;
o2net_debug_del_sc(dummy_sc);
return seq_release_private(inode, file);
}
+static int stats_fop_open(struct inode *inode, struct file *file)
+{
+ struct o2net_sock_debug *sd;
+
+ sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+ if (sd == NULL)
+ return -ENOMEM;
+
+ sd->dbg_ctxt = SHOW_SOCK_STATS;
+ sd->dbg_sock = NULL;
+
+ return sc_common_open(file, sd);
+}
+
+static const struct file_operations stats_seq_fops = {
+ .open = stats_fop_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = sc_fop_release,
+};
+
+static int sc_fop_open(struct inode *inode, struct file *file)
+{
+ struct o2net_sock_debug *sd;
+
+ sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+ if (sd == NULL)
+ return -ENOMEM;
+
+ sd->dbg_ctxt = SHOW_SOCK_CONTAINERS;
+ sd->dbg_sock = NULL;
+
+ return sc_common_open(file, sd);
+}
+
static const struct file_operations sc_seq_fops = {
.open = sc_fop_open,
.read = seq_read,
@@ -419,25 +515,29 @@ int o2net_debugfs_init(void)
goto bail;
}
+ stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
+ o2net_dentry, NULL,
+ &stats_seq_fops);
+ if (!stats_dentry) {
+ mlog_errno(-ENOMEM);
+ goto bail;
+ }
+
return 0;
bail:
- if (sc_dentry)
- debugfs_remove(sc_dentry);
- if (nst_dentry)
- debugfs_remove(nst_dentry);
- if (o2net_dentry)
- debugfs_remove(o2net_dentry);
+ debugfs_remove(stats_dentry);
+ debugfs_remove(sc_dentry);
+ debugfs_remove(nst_dentry);
+ debugfs_remove(o2net_dentry);
return -ENOMEM;
}
void o2net_debugfs_exit(void)
{
- if (sc_dentry)
- debugfs_remove(sc_dentry);
- if (nst_dentry)
- debugfs_remove(nst_dentry);
- if (o2net_dentry)
- debugfs_remove(o2net_dentry);
+ debugfs_remove(stats_dentry);
+ debugfs_remove(sc_dentry);
+ debugfs_remove(nst_dentry);
+ debugfs_remove(o2net_dentry);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 9aa426e4212..3b11cb1e38f 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -153,63 +153,114 @@ static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
nst->st_node = node;
}
-static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
+static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
{
- do_gettimeofday(&nst->st_sock_time);
+ nst->st_sock_time = ktime_get();
}
-static void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
+static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
{
- do_gettimeofday(&nst->st_send_time);
+ nst->st_send_time = ktime_get();
}
-static void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
+static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
{
- do_gettimeofday(&nst->st_status_time);
+ nst->st_status_time = ktime_get();
}
-static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
- struct o2net_sock_container *sc)
+static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
+ struct o2net_sock_container *sc)
{
nst->st_sc = sc;
}
-static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id)
+static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
+ u32 msg_id)
{
nst->st_id = msg_id;
}
-#else /* CONFIG_DEBUG_FS */
-
-static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
- u32 msgkey, struct task_struct *task, u8 node)
+static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
{
+ sc->sc_tv_timer = ktime_get();
}
-static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
+static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
{
+ sc->sc_tv_data_ready = ktime_get();
}
-static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
+static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
{
+ sc->sc_tv_advance_start = ktime_get();
}
-static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
+static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
{
+ sc->sc_tv_advance_stop = ktime_get();
}
-static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
- struct o2net_sock_container *sc)
+static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
{
+ sc->sc_tv_func_start = ktime_get();
}
-static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
- u32 msg_id)
+static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
{
+ sc->sc_tv_func_stop = ktime_get();
}
+static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
+{
+ return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+#else /* CONFIG_DEBUG_FS */
+# define o2net_init_nst(a, b, c, d, e)
+# define o2net_set_nst_sock_time(a)
+# define o2net_set_nst_send_time(a)
+# define o2net_set_nst_status_time(a)
+# define o2net_set_nst_sock_container(a, b)
+# define o2net_set_nst_msg_id(a, b)
+# define o2net_set_sock_timer(a)
+# define o2net_set_data_ready_time(a)
+# define o2net_set_advance_start_time(a)
+# define o2net_set_advance_stop_time(a)
+# define o2net_set_func_start_time(a)
+# define o2net_set_func_stop_time(a)
+# define o2net_get_func_run_time(a) (ktime_t)0
#endif /* CONFIG_DEBUG_FS */
+#ifdef CONFIG_OCFS2_FS_STATS
+static void o2net_update_send_stats(struct o2net_send_tracking *nst,
+ struct o2net_sock_container *sc)
+{
+ sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
+ ktime_sub(ktime_get(),
+ nst->st_status_time));
+ sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
+ ktime_sub(nst->st_status_time,
+ nst->st_send_time));
+ sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
+ ktime_sub(nst->st_send_time,
+ nst->st_sock_time));
+ sc->sc_send_count++;
+}
+
+static void o2net_update_recv_stats(struct o2net_sock_container *sc)
+{
+ sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
+ o2net_get_func_run_time(sc));
+ sc->sc_recv_count++;
+}
+
+#else
+
+# define o2net_update_send_stats(a, b)
+
+# define o2net_update_recv_stats(sc)
+
+#endif /* CONFIG_OCFS2_FS_STATS */
+
static inline int o2net_reconnect_delay(void)
{
return o2nm_single_cluster->cl_reconnect_delay_ms;
@@ -355,6 +406,7 @@ static void sc_kref_release(struct kref *kref)
sc->sc_sock = NULL;
}
+ o2nm_undepend_item(&sc->sc_node->nd_item);
o2nm_node_put(sc->sc_node);
sc->sc_node = NULL;
@@ -376,6 +428,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
{
struct o2net_sock_container *sc, *ret = NULL;
struct page *page = NULL;
+ int status = 0;
page = alloc_page(GFP_NOFS);
sc = kzalloc(sizeof(*sc), GFP_NOFS);
@@ -386,6 +439,13 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
o2nm_node_get(node);
sc->sc_node = node;
+ /* pin the node item of the remote node */
+ status = o2nm_depend_item(&node->nd_item);
+ if (status) {
+ mlog_errno(status);
+ o2nm_node_put(node);
+ goto out;
+ }
INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
@@ -546,7 +606,7 @@ static void o2net_data_ready(struct sock *sk, int bytes)
if (sk->sk_user_data) {
struct o2net_sock_container *sc = sk->sk_user_data;
sclog(sc, "data_ready hit\n");
- do_gettimeofday(&sc->sc_tv_data_ready);
+ o2net_set_data_ready_time(sc);
o2net_sc_queue_work(sc, &sc->sc_rx_work);
ready = sc->sc_data_ready;
} else {
@@ -1070,6 +1130,8 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
o2net_set_nst_status_time(&nst);
wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
+ o2net_update_send_stats(&nst, sc);
+
/* Note that we avoid overwriting the callers status return
* variable if a system error was reported on the other
* side. Callers beware. */
@@ -1183,13 +1245,15 @@ static int o2net_process_message(struct o2net_sock_container *sc,
if (syserr != O2NET_ERR_NONE)
goto out_respond;
- do_gettimeofday(&sc->sc_tv_func_start);
+ o2net_set_func_start_time(sc);
sc->sc_msg_key = be32_to_cpu(hdr->key);
sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
be16_to_cpu(hdr->data_len),
nmh->nh_func_data, &ret_data);
- do_gettimeofday(&sc->sc_tv_func_stop);
+ o2net_set_func_stop_time(sc);
+
+ o2net_update_recv_stats(sc);
out_respond:
/* this destroys the hdr, so don't use it after this */
@@ -1300,7 +1364,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc)
size_t datalen;
sclog(sc, "receiving\n");
- do_gettimeofday(&sc->sc_tv_advance_start);
+ o2net_set_advance_start_time(sc);
if (unlikely(sc->sc_handshake_ok == 0)) {
if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
@@ -1375,7 +1439,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc)
out:
sclog(sc, "ret = %d\n", ret);
- do_gettimeofday(&sc->sc_tv_advance_stop);
+ o2net_set_advance_stop_time(sc);
return ret;
}
@@ -1475,27 +1539,28 @@ static void o2net_idle_timer(unsigned long data)
{
struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
- struct timeval now;
- do_gettimeofday(&now);
+#ifdef CONFIG_DEBUG_FS
+ ktime_t now = ktime_get();
+#endif
printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
"seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
- mlog(ML_NOTICE, "here are some times that might help debug the "
- "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
- "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
- sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
- now.tv_sec, (long) now.tv_usec,
- sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
- sc->sc_tv_advance_start.tv_sec,
- (long) sc->sc_tv_advance_start.tv_usec,
- sc->sc_tv_advance_stop.tv_sec,
- (long) sc->sc_tv_advance_stop.tv_usec,
+
+#ifdef CONFIG_DEBUG_FS
+ mlog(ML_NOTICE, "Here are some times that might help debug the "
+ "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
+ "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
+ (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
+ (long long)ktime_to_us(sc->sc_tv_data_ready),
+ (long long)ktime_to_us(sc->sc_tv_advance_start),
+ (long long)ktime_to_us(sc->sc_tv_advance_stop),
sc->sc_msg_key, sc->sc_msg_type,
- sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec,
- sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec);
+ (long long)ktime_to_us(sc->sc_tv_func_start),
+ (long long)ktime_to_us(sc->sc_tv_func_stop));
+#endif
/*
* Initialize the nn_timeout so that the next connection attempt
@@ -1511,7 +1576,7 @@ static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc)
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
msecs_to_jiffies(o2net_keepalive_delay()));
- do_gettimeofday(&sc->sc_tv_timer);
+ o2net_set_sock_timer(sc);
mod_timer(&sc->sc_idle_timeout,
jiffies + msecs_to_jiffies(o2net_idle_timeout()));
}
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 15fdbdf9eb4..4cbcb65784a 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -166,18 +166,27 @@ struct o2net_sock_container {
/* original handlers for the sockets */
void (*sc_state_change)(struct sock *sk);
void (*sc_data_ready)(struct sock *sk, int bytes);
-#ifdef CONFIG_DEBUG_FS
- struct list_head sc_net_debug_item;
-#endif
- struct timeval sc_tv_timer;
- struct timeval sc_tv_data_ready;
- struct timeval sc_tv_advance_start;
- struct timeval sc_tv_advance_stop;
- struct timeval sc_tv_func_start;
- struct timeval sc_tv_func_stop;
+
u32 sc_msg_key;
u16 sc_msg_type;
+#ifdef CONFIG_DEBUG_FS
+ struct list_head sc_net_debug_item;
+ ktime_t sc_tv_timer;
+ ktime_t sc_tv_data_ready;
+ ktime_t sc_tv_advance_start;
+ ktime_t sc_tv_advance_stop;
+ ktime_t sc_tv_func_start;
+ ktime_t sc_tv_func_stop;
+#endif
+#ifdef CONFIG_OCFS2_FS_STATS
+ ktime_t sc_tv_acquiry_total;
+ ktime_t sc_tv_send_total;
+ ktime_t sc_tv_status_total;
+ u32 sc_send_count;
+ u32 sc_recv_count;
+ ktime_t sc_tv_process_total;
+#endif
struct mutex sc_send_lock;
};
@@ -220,9 +229,9 @@ struct o2net_send_tracking {
u32 st_msg_type;
u32 st_msg_key;
u8 st_node;
- struct timeval st_sock_time;
- struct timeval st_send_time;
- struct timeval st_status_time;
+ ktime_t st_sock_time;
+ ktime_t st_send_time;
+ ktime_t st_status_time;
};
#else
struct o2net_send_tracking {
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index f4499915683..3a3ed4bb794 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -90,19 +90,29 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
- mlog_entry_void();
+ struct dlm_lock_resource *res;
BUG_ON(!dlm);
BUG_ON(!lock);
+ res = lock->lockres;
+
assert_spin_locked(&dlm->ast_lock);
+
if (!list_empty(&lock->ast_list)) {
- mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n",
+ mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
+ "AST list not empty, pending %d, newlevel %d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ast_pending, lock->ml.type);
BUG();
}
if (lock->ast_pending)
- mlog(0, "lock has an ast getting flushed right now\n");
+ mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
/* putting lock on list, add a ref */
dlm_lock_get(lock);
@@ -110,9 +120,10 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
/* check to see if this ast obsoletes the bast */
if (dlm_should_cancel_bast(dlm, lock)) {
- struct dlm_lock_resource *res = lock->lockres;
- mlog(0, "%s: cancelling bast for %.*s\n",
- dlm->name, res->lockname.len, res->lockname.name);
+ mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
lock->bast_pending = 0;
list_del_init(&lock->bast_list);
lock->ml.highest_blocked = LKM_IVMODE;
@@ -134,8 +145,6 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
- mlog_entry_void();
-
BUG_ON(!dlm);
BUG_ON(!lock);
@@ -147,15 +156,21 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
- mlog_entry_void();
+ struct dlm_lock_resource *res;
BUG_ON(!dlm);
BUG_ON(!lock);
+
assert_spin_locked(&dlm->ast_lock);
+ res = lock->lockres;
+
BUG_ON(!list_empty(&lock->bast_list));
if (lock->bast_pending)
- mlog(0, "lock has a bast getting flushed right now\n");
+ mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
/* putting lock on list, add a ref */
dlm_lock_get(lock);
@@ -167,8 +182,6 @@ void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
{
- mlog_entry_void();
-
BUG_ON(!dlm);
BUG_ON(!lock);
@@ -213,7 +226,10 @@ void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
dlm_astlockfunc_t *fn;
struct dlm_lockstatus *lksb;
- mlog_entry_void();
+ mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
lksb = lock->lksb;
fn = lock->ast;
@@ -231,7 +247,10 @@ int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lockstatus *lksb;
int lksbflags;
- mlog_entry_void();
+ mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
lksb = lock->lksb;
BUG_ON(lock->ml.node == dlm->node_num);
@@ -250,9 +269,14 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
{
dlm_bastlockfunc_t *fn = lock->bast;
- mlog_entry_void();
BUG_ON(lock->ml.node != dlm->node_num);
+ mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ blocked_type);
+
(*fn)(lock->astdata, blocked_type);
}
@@ -332,7 +356,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
/* cannot get a proxy ast message if this node owns it */
BUG_ON(res->owner == dlm->node_num);
- mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
+ mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -382,8 +407,12 @@ do_ast:
if (past->type == DLM_AST) {
/* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, &res->granted);
- mlog(0, "ast: Adding to granted list... type=%d, "
- "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
+ mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+ lock->ml.type, lock->ml.convert_type);
+
if (lock->ml.convert_type != LKM_IVMODE) {
lock->ml.type = lock->ml.convert_type;
lock->ml.convert_type = LKM_IVMODE;
@@ -426,9 +455,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
size_t veclen = 1;
int status;
- mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
- res->lockname.len, res->lockname.name, lock->ml.node,
- msg_type, blocked_type);
+ mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
+ res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
+ blocked_type);
memset(&past, 0, sizeof(struct dlm_proxy_ast));
past.node_idx = dlm->node_num;
@@ -441,7 +470,6 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
vec[0].iov_len = sizeof(struct dlm_proxy_ast);
vec[0].iov_base = &past;
if (flags & DLM_LKSB_GET_LVB) {
- mlog(0, "returning requested LVB data\n");
be32_add_cpu(&past.flags, LKM_GET_LVB);
vec[1].iov_len = DLM_LVB_LEN;
vec[1].iov_base = lock->lksb->lvb;
@@ -451,8 +479,8 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
lock->ml.node, &status);
if (ret < 0)
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
- "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key,
+ mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
+ dlm->name, res->lockname.len, res->lockname.name, ret,
lock->ml.node);
else {
if (status == DLM_RECOVERING) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index b36d0bf77a5..4bdf7baee34 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -50,10 +50,10 @@
#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
enum dlm_mle_type {
- DLM_MLE_BLOCK,
- DLM_MLE_MASTER,
- DLM_MLE_MIGRATION,
- DLM_MLE_NUM_TYPES
+ DLM_MLE_BLOCK = 0,
+ DLM_MLE_MASTER = 1,
+ DLM_MLE_MIGRATION = 2,
+ DLM_MLE_NUM_TYPES = 3,
};
struct dlm_master_list_entry {
@@ -82,8 +82,8 @@ struct dlm_master_list_entry {
enum dlm_ast_type {
DLM_AST = 0,
- DLM_BAST,
- DLM_ASTUNLOCK
+ DLM_BAST = 1,
+ DLM_ASTUNLOCK = 2,
};
@@ -119,9 +119,9 @@ struct dlm_recovery_ctxt
enum dlm_ctxt_state {
DLM_CTXT_NEW = 0,
- DLM_CTXT_JOINED,
- DLM_CTXT_IN_SHUTDOWN,
- DLM_CTXT_LEAVING,
+ DLM_CTXT_JOINED = 1,
+ DLM_CTXT_IN_SHUTDOWN = 2,
+ DLM_CTXT_LEAVING = 3,
};
struct dlm_ctxt
@@ -388,8 +388,8 @@ struct dlm_lock
enum dlm_lockres_list {
DLM_GRANTED_LIST = 0,
- DLM_CONVERTING_LIST,
- DLM_BLOCKED_LIST
+ DLM_CONVERTING_LIST = 1,
+ DLM_BLOCKED_LIST = 2,
};
static inline int dlm_lvb_is_empty(char *lvb)
@@ -427,27 +427,27 @@ struct dlm_node_iter
enum {
- DLM_MASTER_REQUEST_MSG = 500,
- DLM_UNUSED_MSG1, /* 501 */
- DLM_ASSERT_MASTER_MSG, /* 502 */
- DLM_CREATE_LOCK_MSG, /* 503 */
- DLM_CONVERT_LOCK_MSG, /* 504 */
- DLM_PROXY_AST_MSG, /* 505 */
- DLM_UNLOCK_LOCK_MSG, /* 506 */
- DLM_DEREF_LOCKRES_MSG, /* 507 */
- DLM_MIGRATE_REQUEST_MSG, /* 508 */
- DLM_MIG_LOCKRES_MSG, /* 509 */
- DLM_QUERY_JOIN_MSG, /* 510 */
- DLM_ASSERT_JOINED_MSG, /* 511 */
- DLM_CANCEL_JOIN_MSG, /* 512 */
- DLM_EXIT_DOMAIN_MSG, /* 513 */
- DLM_MASTER_REQUERY_MSG, /* 514 */
- DLM_LOCK_REQUEST_MSG, /* 515 */
- DLM_RECO_DATA_DONE_MSG, /* 516 */
- DLM_BEGIN_RECO_MSG, /* 517 */
- DLM_FINALIZE_RECO_MSG, /* 518 */
- DLM_QUERY_REGION, /* 519 */
- DLM_QUERY_NODEINFO, /* 520 */
+ DLM_MASTER_REQUEST_MSG = 500,
+ DLM_UNUSED_MSG1 = 501,
+ DLM_ASSERT_MASTER_MSG = 502,
+ DLM_CREATE_LOCK_MSG = 503,
+ DLM_CONVERT_LOCK_MSG = 504,
+ DLM_PROXY_AST_MSG = 505,
+ DLM_UNLOCK_LOCK_MSG = 506,
+ DLM_DEREF_LOCKRES_MSG = 507,
+ DLM_MIGRATE_REQUEST_MSG = 508,
+ DLM_MIG_LOCKRES_MSG = 509,
+ DLM_QUERY_JOIN_MSG = 510,
+ DLM_ASSERT_JOINED_MSG = 511,
+ DLM_CANCEL_JOIN_MSG = 512,
+ DLM_EXIT_DOMAIN_MSG = 513,
+ DLM_MASTER_REQUERY_MSG = 514,
+ DLM_LOCK_REQUEST_MSG = 515,
+ DLM_RECO_DATA_DONE_MSG = 516,
+ DLM_BEGIN_RECO_MSG = 517,
+ DLM_FINALIZE_RECO_MSG = 518,
+ DLM_QUERY_REGION = 519,
+ DLM_QUERY_NODEINFO = 520,
};
struct dlm_reco_node_data
@@ -460,19 +460,19 @@ struct dlm_reco_node_data
enum {
DLM_RECO_NODE_DATA_DEAD = -1,
DLM_RECO_NODE_DATA_INIT = 0,
- DLM_RECO_NODE_DATA_REQUESTING,
- DLM_RECO_NODE_DATA_REQUESTED,
- DLM_RECO_NODE_DATA_RECEIVING,
- DLM_RECO_NODE_DATA_DONE,
- DLM_RECO_NODE_DATA_FINALIZE_SENT,
+ DLM_RECO_NODE_DATA_REQUESTING = 1,
+ DLM_RECO_NODE_DATA_REQUESTED = 2,
+ DLM_RECO_NODE_DATA_RECEIVING = 3,
+ DLM_RECO_NODE_DATA_DONE = 4,
+ DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
};
enum {
DLM_MASTER_RESP_NO = 0,
- DLM_MASTER_RESP_YES,
- DLM_MASTER_RESP_MAYBE,
- DLM_MASTER_RESP_ERROR
+ DLM_MASTER_RESP_YES = 1,
+ DLM_MASTER_RESP_MAYBE = 2,
+ DLM_MASTER_RESP_ERROR = 3,
};
@@ -649,9 +649,9 @@ struct dlm_proxy_ast
#define DLM_MOD_KEY (0x666c6172)
enum dlm_query_join_response_code {
JOIN_DISALLOW = 0,
- JOIN_OK,
- JOIN_OK_NO_MAP,
- JOIN_PROTOCOL_MISMATCH,
+ JOIN_OK = 1,
+ JOIN_OK_NO_MAP = 2,
+ JOIN_PROTOCOL_MISMATCH = 3,
};
struct dlm_query_join_packet {
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 272ec8631a5..04a32be0aeb 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -370,92 +370,46 @@ static void dlm_debug_get(struct dlm_debug_ctxt *dc)
kref_get(&dc->debug_refcnt);
}
-static struct debug_buffer *debug_buffer_allocate(void)
+static int debug_release(struct inode *inode, struct file *file)
{
- struct debug_buffer *db = NULL;
-
- db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
- if (!db)
- goto bail;
-
- db->len = PAGE_SIZE;
- db->buf = kmalloc(db->len, GFP_KERNEL);
- if (!db->buf)
- goto bail;
-
- return db;
-bail:
- kfree(db);
- return NULL;
-}
-
-static ssize_t debug_buffer_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
-{
- struct debug_buffer *db = file->private_data;
-
- return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len);
-}
-
-static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence)
-{
- struct debug_buffer *db = file->private_data;
- loff_t new = -1;
-
- switch (whence) {
- case 0:
- new = off;
- break;
- case 1:
- new = file->f_pos + off;
- break;
- }
-
- if (new < 0 || new > db->len)
- return -EINVAL;
-
- return (file->f_pos = new);
+ free_page((unsigned long)file->private_data);
+ return 0;
}
-static int debug_buffer_release(struct inode *inode, struct file *file)
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
- struct debug_buffer *db = file->private_data;
-
- if (db)
- kfree(db->buf);
- kfree(db);
-
- return 0;
+ return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+ i_size_read(file->f_mapping->host));
}
/* end - util funcs */
/* begin - purge list funcs */
-static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
{
struct dlm_lock_resource *res;
int out = 0;
unsigned long total = 0;
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Dumping Purgelist for Domain: %s\n", dlm->name);
spin_lock(&dlm->spinlock);
list_for_each_entry(res, &dlm->purge_list, purge) {
++total;
- if (db->len - out < 100)
+ if (len - out < 100)
continue;
spin_lock(&res->spinlock);
out += stringify_lockname(res->lockname.name,
res->lockname.len,
- db->buf + out, db->len - out);
- out += snprintf(db->buf + out, db->len - out, "\t%ld\n",
+ buf + out, len - out);
+ out += snprintf(buf + out, len - out, "\t%ld\n",
(jiffies - res->last_used)/HZ);
spin_unlock(&res->spinlock);
}
spin_unlock(&dlm->spinlock);
- out += snprintf(db->buf + out, db->len - out,
- "Total on list: %ld\n", total);
+ out += snprintf(buf + out, len - out, "Total on list: %ld\n", total);
return out;
}
@@ -463,15 +417,15 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
static int debug_purgelist_open(struct inode *inode, struct file *file)
{
struct dlm_ctxt *dlm = inode->i_private;
- struct debug_buffer *db;
+ char *buf = NULL;
- db = debug_buffer_allocate();
- if (!db)
+ buf = (char *) get_zeroed_page(GFP_NOFS);
+ if (!buf)
goto bail;
- db->len = debug_purgelist_print(dlm, db);
+ i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
- file->private_data = db;
+ file->private_data = buf;
return 0;
bail:
@@ -480,14 +434,14 @@ bail:
static const struct file_operations debug_purgelist_fops = {
.open = debug_purgelist_open,
- .release = debug_buffer_release,
- .read = debug_buffer_read,
- .llseek = debug_buffer_llseek,
+ .release = debug_release,
+ .read = debug_read,
+ .llseek = generic_file_llseek,
};
/* end - purge list funcs */
/* begin - debug mle funcs */
-static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
{
struct dlm_master_list_entry *mle;
struct hlist_head *bucket;
@@ -495,7 +449,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
int i, out = 0;
unsigned long total = 0, longest = 0, bucket_count = 0;
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Dumping MLEs for Domain: %s\n", dlm->name);
spin_lock(&dlm->master_lock);
@@ -506,16 +460,16 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
master_hash_node);
++total;
++bucket_count;
- if (db->len - out < 200)
+ if (len - out < 200)
continue;
- out += dump_mle(mle, db->buf + out, db->len - out);
+ out += dump_mle(mle, buf + out, len - out);
}
longest = max(longest, bucket_count);
bucket_count = 0;
}
spin_unlock(&dlm->master_lock);
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Total: %ld, Longest: %ld\n", total, longest);
return out;
}
@@ -523,15 +477,15 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
static int debug_mle_open(struct inode *inode, struct file *file)
{
struct dlm_ctxt *dlm = inode->i_private;
- struct debug_buffer *db;
+ char *buf = NULL;
- db = debug_buffer_allocate();
- if (!db)
+ buf = (char *) get_zeroed_page(GFP_NOFS);
+ if (!buf)
goto bail;
- db->len = debug_mle_print(dlm, db);
+ i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
- file->private_data = db;
+ file->private_data = buf;
return 0;
bail:
@@ -540,9 +494,9 @@ bail:
static const struct file_operations debug_mle_fops = {
.open = debug_mle_open,
- .release = debug_buffer_release,
- .read = debug_buffer_read,
- .llseek = debug_buffer_llseek,
+ .release = debug_release,
+ .read = debug_read,
+ .llseek = generic_file_llseek,
};
/* end - debug mle funcs */
@@ -757,7 +711,7 @@ static const struct file_operations debug_lockres_fops = {
/* end - debug lockres funcs */
/* begin - debug state funcs */
-static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
{
int out = 0;
struct dlm_reco_node_data *node;
@@ -781,35 +735,35 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
}
/* Domain: xxxxxxxxxx Key: 0xdfbac769 */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Domain: %s Key: 0x%08x Protocol: %d.%d\n",
dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor);
/* Thread Pid: xxx Node: xxx State: xxxxx */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Thread Pid: %d Node: %d State: %s\n",
- dlm->dlm_thread_task->pid, dlm->node_num, state);
+ task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
/* Number of Joins: xxx Joining Node: xxx */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Number of Joins: %d Joining Node: %d\n",
dlm->num_joins, dlm->joining_node);
/* Domain Map: xx xx xx */
- out += snprintf(db->buf + out, db->len - out, "Domain Map: ");
+ out += snprintf(buf + out, len - out, "Domain Map: ");
out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
- db->buf + out, db->len - out);
- out += snprintf(db->buf + out, db->len - out, "\n");
+ buf + out, len - out);
+ out += snprintf(buf + out, len - out, "\n");
/* Live Map: xx xx xx */
- out += snprintf(db->buf + out, db->len - out, "Live Map: ");
+ out += snprintf(buf + out, len - out, "Live Map: ");
out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
- db->buf + out, db->len - out);
- out += snprintf(db->buf + out, db->len - out, "\n");
+ buf + out, len - out);
+ out += snprintf(buf + out, len - out, "\n");
/* Lock Resources: xxx (xxx) */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Lock Resources: %d (%d)\n",
atomic_read(&dlm->res_cur_count),
atomic_read(&dlm->res_tot_count));
@@ -821,29 +775,29 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
cur_mles += atomic_read(&dlm->mle_cur_count[i]);
/* MLEs: xxx (xxx) */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"MLEs: %d (%d)\n", cur_mles, tot_mles);
/* Blocking: xxx (xxx) */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
" Blocking: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
/* Mastery: xxx (xxx) */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
" Mastery: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
/* Migration: xxx (xxx) */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
" Migration: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
/* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Lists: Dirty=%s Purge=%s PendingASTs=%s "
"PendingBASTs=%s\n",
(list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
@@ -852,12 +806,12 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
(list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
/* Purge Count: xxx Refs: xxx */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Purge Count: %d Refs: %d\n", dlm->purge_count,
atomic_read(&dlm->dlm_refs.refcount));
/* Dead Node: xxx */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Dead Node: %d\n", dlm->reco.dead_node);
/* What about DLM_RECO_STATE_FINALIZE? */
@@ -867,19 +821,19 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
state = "INACTIVE";
/* Recovery Pid: xxxx Master: xxx State: xxxx */
- out += snprintf(db->buf + out, db->len - out,
+ out += snprintf(buf + out, len - out,
"Recovery Pid: %d Master: %d State: %s\n",
- dlm->dlm_reco_thread_task->pid,
+ task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.new_master, state);
/* Recovery Map: xx xx */
- out += snprintf(db->buf + out, db->len - out, "Recovery Map: ");
+ out += snprintf(buf + out, len - out, "Recovery Map: ");
out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
- db->buf + out, db->len - out);
- out += snprintf(db->buf + out, db->len - out, "\n");
+ buf + out, len - out);
+ out += snprintf(buf + out, len - out, "\n");
/* Recovery Node State: */
- out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n");
+ out += snprintf(buf + out, len - out, "Recovery Node State:\n");
list_for_each_entry(node, &dlm->reco.node_data, list) {
switch (node->state) {
case DLM_RECO_NODE_DATA_INIT:
@@ -907,7 +861,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
state = "BAD";
break;
}
- out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n",
+ out += snprintf(buf + out, len - out, "\t%u - %s\n",
node->node_num, state);
}
@@ -919,15 +873,15 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
static int debug_state_open(struct inode *inode, struct file *file)
{
struct dlm_ctxt *dlm = inode->i_private;
- struct debug_buffer *db = NULL;
+ char *buf = NULL;
- db = debug_buffer_allocate();
- if (!db)
+ buf = (char *) get_zeroed_page(GFP_NOFS);
+ if (!buf)
goto bail;
- db->len = debug_state_print(dlm, db);
+ i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
- file->private_data = db;
+ file->private_data = buf;
return 0;
bail:
@@ -936,9 +890,9 @@ bail:
static const struct file_operations debug_state_fops = {
.open = debug_state_open,
- .release = debug_buffer_release,
- .read = debug_buffer_read,
- .llseek = debug_buffer_llseek,
+ .release = debug_release,
+ .read = debug_read,
+ .llseek = generic_file_llseek,
};
/* end - debug state funcs */
@@ -1002,14 +956,10 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm)
struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
if (dc) {
- if (dc->debug_purgelist_dentry)
- debugfs_remove(dc->debug_purgelist_dentry);
- if (dc->debug_mle_dentry)
- debugfs_remove(dc->debug_mle_dentry);
- if (dc->debug_lockres_dentry)
- debugfs_remove(dc->debug_lockres_dentry);
- if (dc->debug_state_dentry)
- debugfs_remove(dc->debug_state_dentry);
+ debugfs_remove(dc->debug_purgelist_dentry);
+ debugfs_remove(dc->debug_mle_dentry);
+ debugfs_remove(dc->debug_lockres_dentry);
+ debugfs_remove(dc->debug_state_dentry);
dlm_debug_put(dc);
}
}
@@ -1040,8 +990,7 @@ bail:
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
- if (dlm->dlm_debugfs_subroot)
- debugfs_remove(dlm->dlm_debugfs_subroot);
+ debugfs_remove(dlm->dlm_debugfs_subroot);
}
/* debugfs root */
@@ -1057,7 +1006,6 @@ int dlm_create_debugfs_root(void)
void dlm_destroy_debugfs_root(void)
{
- if (dlm_debugfs_root)
- debugfs_remove(dlm_debugfs_root);
+ debugfs_remove(dlm_debugfs_root);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 8c686d22f9c..1f27c4812d1 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -37,11 +37,6 @@ struct dlm_debug_ctxt {
struct dentry *debug_purgelist_dentry;
};
-struct debug_buffer {
- int len;
- char *buf;
-};
-
struct debug_lockres {
int dl_len;
char *dl_buf;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index cc2aaa96cfe..7e38a072d72 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -460,8 +460,6 @@ redo_bucket:
}
cond_resched_lock(&dlm->spinlock);
num += n;
- mlog(0, "%s: touched %d lockreses in bucket %d "
- "(tot=%d)\n", dlm->name, n, i, num);
}
spin_unlock(&dlm->spinlock);
wake_up(&dlm->dlm_thread_wq);
@@ -1661,8 +1659,8 @@ bail:
static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
{
- o2hb_unregister_callback(NULL, &dlm->dlm_hb_up);
- o2hb_unregister_callback(NULL, &dlm->dlm_hb_down);
+ o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
+ o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
}
@@ -1674,13 +1672,13 @@ static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
- status = o2hb_register_callback(NULL, &dlm->dlm_hb_down);
+ status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
if (status)
goto bail;
o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
- status = o2hb_register_callback(NULL, &dlm->dlm_hb_up);
+ status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
if (status)
goto bail;
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 69cf369961c..7009292aac5 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -106,6 +106,9 @@ static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
return 0;
+ if (!dlm_lock_compatible(tmplock->ml.convert_type,
+ lock->ml.type))
+ return 0;
}
return 1;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 2211acf33d9..1d6d1d22c47 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -122,15 +122,13 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
- mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
-
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
if (__dlm_lockres_unused(res)){
if (list_empty(&res->purge)) {
- mlog(0, "putting lockres %.*s:%p onto purge list\n",
- res->lockname.len, res->lockname.name, res);
+ mlog(0, "%s: Adding res %.*s to purge list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
res->last_used = jiffies;
dlm_lockres_get(res);
@@ -138,8 +136,8 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
dlm->purge_count++;
}
} else if (!list_empty(&res->purge)) {
- mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
- res->lockname.len, res->lockname.name, res, res->owner);
+ mlog(0, "%s: Removing res %.*s from purge list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
list_del_init(&res->purge);
dlm_lockres_put(res);
@@ -150,7 +148,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
- mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
@@ -171,9 +168,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
master = (res->owner == dlm->node_num);
-
- mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
- res->lockname.name, master);
+ mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
+ res->lockname.len, res->lockname.name, master);
if (!master) {
res->state |= DLM_LOCK_RES_DROPPING_REF;
@@ -189,27 +185,25 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
/* clear our bit from the master's refmap, ignore errors */
ret = dlm_drop_lockres_ref(dlm, res);
if (ret < 0) {
- mlog_errno(ret);
+ mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
+ res->lockname.len, res->lockname.name, ret);
if (!dlm_is_host_down(ret))
BUG();
}
- mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
- dlm->name, res->lockname.len, res->lockname.name, ret);
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
}
if (!list_empty(&res->purge)) {
- mlog(0, "removing lockres %.*s:%p from purgelist, "
- "master = %d\n", res->lockname.len, res->lockname.name,
- res, master);
+ mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
+ dlm->name, res->lockname.len, res->lockname.name, master);
list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--;
}
if (!__dlm_lockres_unused(res)) {
- mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
+ mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
dlm->name, res->lockname.len, res->lockname.name);
__dlm_print_one_lock_resource(res);
BUG();
@@ -266,10 +260,10 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
unused = __dlm_lockres_unused(lockres);
if (!unused ||
(lockres->state & DLM_LOCK_RES_MIGRATING)) {
- mlog(0, "lockres %s:%.*s: is in use or "
- "being remastered, used %d, state %d\n",
- dlm->name, lockres->lockname.len,
- lockres->lockname.name, !unused, lockres->state);
+ mlog(0, "%s: res %.*s is in use or being remastered, "
+ "used %d, state %d\n", dlm->name,
+ lockres->lockname.len, lockres->lockname.name,
+ !unused, lockres->state);
list_move_tail(&dlm->purge_list, &lockres->purge);
spin_unlock(&lockres->spinlock);
continue;
@@ -296,15 +290,12 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
struct list_head *head;
int can_grant = 1;
- //mlog(0, "res->lockname.len=%d\n", res->lockname.len);
- //mlog(0, "res->lockname.name=%p\n", res->lockname.name);
- //mlog(0, "shuffle res %.*s\n", res->lockname.len,
- // res->lockname.name);
-
- /* because this function is called with the lockres
+ /*
+ * Because this function is called with the lockres
* spinlock, and because we know that it is not migrating/
* recovering/in-progress, it is fine to reserve asts and
- * basts right before queueing them all throughout */
+ * basts right before queueing them all throughout
+ */
assert_spin_locked(&dlm->ast_lock);
assert_spin_locked(&res->spinlock);
BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
@@ -314,13 +305,13 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
converting:
if (list_empty(&res->converting))
goto blocked;
- mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
- res->lockname.name);
+ mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
+ res->lockname.len, res->lockname.name);
target = list_entry(res->converting.next, struct dlm_lock, list);
if (target->ml.convert_type == LKM_IVMODE) {
- mlog(ML_ERROR, "%.*s: converting a lock with no "
- "convert_type!\n", res->lockname.len, res->lockname.name);
+ mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
+ dlm->name, res->lockname.len, res->lockname.name);
BUG();
}
head = &res->granted;
@@ -365,9 +356,12 @@ converting:
spin_lock(&target->spinlock);
BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
- mlog(0, "calling ast for converting lock: %.*s, have: %d, "
- "granting: %d, node: %u\n", res->lockname.len,
- res->lockname.name, target->ml.type,
+ mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
+ "%d => %d, node %u\n", dlm->name, res->lockname.len,
+ res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
+ target->ml.type,
target->ml.convert_type, target->ml.node);
target->ml.type = target->ml.convert_type;
@@ -428,11 +422,14 @@ blocked:
spin_lock(&target->spinlock);
BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
- mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
- "node: %u\n", res->lockname.len, res->lockname.name,
+ mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
+ "node %u\n", dlm->name, res->lockname.len,
+ res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
target->ml.type, target->ml.node);
- // target->ml.type is already correct
+ /* target->ml.type is already correct */
list_move_tail(&target->list, &res->granted);
BUG_ON(!target->lksb);
@@ -453,7 +450,6 @@ leave:
/* must have NO locks when calling this with res !=NULL * */
void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
- mlog_entry("dlm=%p, res=%p\n", dlm, res);
if (res) {
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
@@ -466,8 +462,6 @@ void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
- mlog_entry("dlm=%p, res=%p\n", dlm, res);
-
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
@@ -484,13 +478,16 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
res->state |= DLM_LOCK_RES_DIRTY;
}
}
+
+ mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
}
/* Launch the NM thread for the mounted volume */
int dlm_launch_thread(struct dlm_ctxt *dlm)
{
- mlog(0, "starting dlm thread...\n");
+ mlog(0, "Starting dlm_thread...\n");
dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
if (IS_ERR(dlm->dlm_thread_task)) {
@@ -505,7 +502,7 @@ int dlm_launch_thread(struct dlm_ctxt *dlm)
void dlm_complete_thread(struct dlm_ctxt *dlm)
{
if (dlm->dlm_thread_task) {
- mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
+ mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
kthread_stop(dlm->dlm_thread_task);
dlm->dlm_thread_task = NULL;
}
@@ -536,7 +533,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
/* get an extra ref on lock */
dlm_lock_get(lock);
res = lock->lockres;
- mlog(0, "delivering an ast for this lockres\n");
+ mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
+ "node %u\n", dlm->name, res->lockname.len,
+ res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->ml.type, lock->ml.node);
BUG_ON(!lock->ast_pending);
@@ -557,9 +559,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
/* possible that another ast was queued while
* we were delivering the last one */
if (!list_empty(&lock->ast_list)) {
- mlog(0, "aha another ast got queued while "
- "we were finishing the last one. will "
- "keep the ast_pending flag set.\n");
+ mlog(0, "%s: res %.*s, AST queued while flushing last "
+ "one\n", dlm->name, res->lockname.len,
+ res->lockname.name);
} else
lock->ast_pending = 0;
@@ -590,8 +592,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
dlm_lock_put(lock);
spin_unlock(&dlm->ast_lock);
- mlog(0, "delivering a bast for this lockres "
- "(blocked = %d\n", hi);
+ mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
+ "blocked %d, node %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ hi, lock->ml.node);
if (lock->ml.node != dlm->node_num) {
ret = dlm_send_proxy_bast(dlm, res, lock, hi);
@@ -605,9 +611,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
/* possible that another bast was queued while
* we were delivering the last one */
if (!list_empty(&lock->bast_list)) {
- mlog(0, "aha another bast got queued while "
- "we were finishing the last one. will "
- "keep the bast_pending flag set.\n");
+ mlog(0, "%s: res %.*s, BAST queued while flushing last "
+ "one\n", dlm->name, res->lockname.len,
+ res->lockname.name);
} else
lock->bast_pending = 0;
@@ -675,11 +681,12 @@ static int dlm_thread(void *data)
spin_lock(&res->spinlock);
if (res->owner != dlm->node_num) {
__dlm_print_one_lock_resource(res);
- mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
- res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
- res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
- res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
- res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+ mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
+ " dirty %d\n", dlm->name,
+ !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
+ !!(res->state & DLM_LOCK_RES_MIGRATING),
+ !!(res->state & DLM_LOCK_RES_RECOVERING),
+ !!(res->state & DLM_LOCK_RES_DIRTY));
}
BUG_ON(res->owner != dlm->node_num);
@@ -693,8 +700,8 @@ static int dlm_thread(void *data)
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->ast_lock);
- mlog(0, "delaying list shuffling for in-"
- "progress lockres %.*s, state=%d\n",
+ mlog(0, "%s: res %.*s, inprogress, delay list "
+ "shuffle, state %d\n", dlm->name,
res->lockname.len, res->lockname.name,
res->state);
delay = 1;
@@ -706,10 +713,6 @@ static int dlm_thread(void *data)
* spinlock and do NOT have the dlm lock.
* safe to reserve/queue asts and run the lists. */
- mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
- "res=%.*s\n", dlm->name,
- res->lockname.len, res->lockname.name);
-
/* called while holding lockres lock */
dlm_shuffle_lists(dlm, res);
res->state &= ~DLM_LOCK_RES_DIRTY;
@@ -733,7 +736,8 @@ in_progress:
/* unlikely, but we may need to give time to
* other tasks */
if (!--n) {
- mlog(0, "throttling dlm_thread\n");
+ mlog(0, "%s: Throttling dlm thread\n",
+ dlm->name);
break;
}
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d14cad6e2e4..30c52314445 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1017,8 +1017,11 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
* An error return must mean that no cluster locks
* were held on function exit.
*/
- if (oi1->ip_blkno != oi2->ip_blkno)
+ if (oi1->ip_blkno != oi2->ip_blkno) {
ocfs2_inode_unlock(inode2, 1);
+ brelse(*bh2);
+ *bh2 = NULL;
+ }
if (status != -ENOENT)
mlog_errno(status);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 70dd3b1798f..51cd6898e7f 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -420,6 +420,11 @@ struct ocfs2_super
struct inode *osb_tl_inode;
struct buffer_head *osb_tl_bh;
struct delayed_work osb_truncate_log_wq;
+ /*
+ * How many clusters in our truncate log.
+ * It must be protected by osb_tl_inode->i_mutex.
+ */
+ unsigned int truncated_clusters;
struct ocfs2_node_map osb_recovering_orphan_dirs;
unsigned int *osb_orphan_wipes;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 0fed41e6efc..84becd3e477 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -133,16 +133,20 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
void __quota_error(struct super_block *sb, const char *func,
- const char *fmt, ...)
+ const char *fmt, ...)
{
- va_list args;
-
if (printk_ratelimit()) {
+ va_list args;
+ struct va_format vaf;
+
va_start(args, fmt);
- printk(KERN_ERR "Quota error (device %s): %s: ",
- sb->s_id, func);
- vprintk(fmt, args);
- printk("\n");
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
+ sb->s_id, func, &vaf);
+
va_end(args);
}
}
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 9e48874eabc..e41c1becf09 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -468,8 +468,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
return -ENOMEM;
ret = read_blk(info, *blk, buf);
if (ret < 0) {
- quota_error(dquot->dq_sb, "Can't read quota data "
- "block %u", blk);
+ quota_error(dquot->dq_sb, "Can't read quota data block %u",
+ *blk);
goto out_buf;
}
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -493,8 +493,9 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
} else {
ret = write_blk(info, *blk, buf);
if (ret < 0)
- quota_error(dquot->dq_sb, "Can't write quota "
- "tree block %u", blk);
+ quota_error(dquot->dq_sb,
+ "Can't write quota tree block %u",
+ *blk);
}
}
out_buf:
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index f8def3c8ea4..0e0e99bd6bc 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -1,6 +1,5 @@
config UDF_FS
tristate "UDF file system support"
- depends on BKL # needs serious work to remove
select CRC_ITU_T
help
This is the new file system used on some CD-ROMs and DVDs. Say Y if
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index b608efaa4ce..306ee39ef2c 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -157,10 +157,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
udf_debug("bit %ld already set\n", bit + i);
udf_debug("byte=%2x\n",
((char *)bh->b_data)[(bit + i) >> 3]);
- } else {
- udf_add_free_space(sb, sbi->s_partition, 1);
}
}
+ udf_add_free_space(sb, sbi->s_partition, count);
mark_buffer_dirty(bh);
if (overflow) {
block += count;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 51552bf5022..eb8bfe2b89a 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -30,7 +30,6 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
@@ -190,18 +189,14 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct inode *dir = filp->f_path.dentry->d_inode;
int result;
- lock_kernel();
-
if (filp->f_pos == 0) {
if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
- unlock_kernel();
return 0;
}
filp->f_pos++;
}
result = do_udf_readdir(dir, filp, filldir, dirent);
- unlock_kernel();
return result;
}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 66b9e7e7e4c..89c78486cbb 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -32,7 +32,6 @@
#include <linux/string.h> /* memset */
#include <linux/capability.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/aio.h>
@@ -114,6 +113,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
size_t count = iocb->ki_left;
struct udf_inode_info *iinfo = UDF_I(inode);
+ down_write(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
if (file->f_flags & O_APPEND)
pos = inode->i_size;
@@ -126,6 +126,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
udf_expand_file_adinicb(inode, pos + count, &err);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
udf_debug("udf_expand_adinicb: err=%d\n", err);
+ up_write(&iinfo->i_data_sem);
return err;
}
} else {
@@ -135,6 +136,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
iinfo->i_lenAlloc = inode->i_size;
}
}
+ up_write(&iinfo->i_data_sem);
retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
if (retval > 0)
@@ -149,8 +151,6 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
long old_block, new_block;
int result = -EINVAL;
- lock_kernel();
-
if (file_permission(filp, MAY_READ) != 0) {
udf_debug("no permission to access inode %lu\n", inode->i_ino);
result = -EPERM;
@@ -196,7 +196,6 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
out:
- unlock_kernel();
return result;
}
@@ -204,10 +203,10 @@ static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE) {
mutex_lock(&inode->i_mutex);
- lock_kernel();
+ down_write(&UDF_I(inode)->i_data_sem);
udf_discard_prealloc(inode);
udf_truncate_tail_extent(inode);
- unlock_kernel();
+ up_write(&UDF_I(inode)->i_data_sem);
mutex_unlock(&inode->i_mutex);
}
return 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 75d9304d0dc..6fb7e0adcda 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -92,28 +92,19 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
return NULL;
}
- mutex_lock(&sbi->s_alloc_mutex);
if (sbi->s_lvid_bh) {
- struct logicalVolIntegrityDesc *lvid =
- (struct logicalVolIntegrityDesc *)
- sbi->s_lvid_bh->b_data;
- struct logicalVolIntegrityDescImpUse *lvidiu =
- udf_sb_lvidiu(sbi);
- struct logicalVolHeaderDesc *lvhd;
- uint64_t uniqueID;
- lvhd = (struct logicalVolHeaderDesc *)
- (lvid->logicalVolContentsUse);
+ struct logicalVolIntegrityDescImpUse *lvidiu;
+
+ iinfo->i_unique = lvid_get_unique_id(sb);
+ mutex_lock(&sbi->s_alloc_mutex);
+ lvidiu = udf_sb_lvidiu(sbi);
if (S_ISDIR(mode))
le32_add_cpu(&lvidiu->numDirs, 1);
else
le32_add_cpu(&lvidiu->numFiles, 1);
- iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
- if (!(++uniqueID & 0x00000000FFFFFFFFUL))
- uniqueID += 16;
- lvhd->uniqueID = cpu_to_le64(uniqueID);
udf_updated_lvid(sb);
+ mutex_unlock(&sbi->s_alloc_mutex);
}
- mutex_unlock(&sbi->s_alloc_mutex);
inode_init_owner(inode, dir, mode);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fc48f37aa2d..c6a2e782b97 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -31,7 +31,6 @@
#include "udfdecl.h"
#include <linux/mm.h>
-#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
@@ -51,6 +50,7 @@ MODULE_LICENSE("GPL");
static mode_t udf_convert_permissions(struct fileEntry *);
static int udf_update_inode(struct inode *, int);
static void udf_fill_inode(struct inode *, struct buffer_head *);
+static int udf_sync_inode(struct inode *inode);
static int udf_alloc_i_data(struct inode *inode, size_t size);
static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
sector_t *, int *);
@@ -79,9 +79,7 @@ void udf_evict_inode(struct inode *inode)
want_delete = 1;
inode->i_size = 0;
udf_truncate(inode);
- lock_kernel();
udf_update_inode(inode, IS_SYNC(inode));
- unlock_kernel();
}
invalidate_inode_buffers(inode);
end_writeback(inode);
@@ -97,9 +95,7 @@ void udf_evict_inode(struct inode *inode)
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
if (want_delete) {
- lock_kernel();
udf_free_inode(inode);
- unlock_kernel();
}
}
@@ -302,10 +298,9 @@ static int udf_get_block(struct inode *inode, sector_t block,
err = -EIO;
new = 0;
bh = NULL;
-
- lock_kernel();
-
iinfo = UDF_I(inode);
+
+ down_write(&iinfo->i_data_sem);
if (block == iinfo->i_next_alloc_block + 1) {
iinfo->i_next_alloc_block++;
iinfo->i_next_alloc_goal++;
@@ -324,7 +319,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
map_bh(bh_result, inode->i_sb, phys);
abort:
- unlock_kernel();
+ up_write(&iinfo->i_data_sem);
return err;
}
@@ -1022,16 +1017,16 @@ void udf_truncate(struct inode *inode)
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return;
- lock_kernel();
iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ down_write(&iinfo->i_data_sem);
if (inode->i_sb->s_blocksize <
(udf_file_entry_alloc_offset(inode) +
inode->i_size)) {
udf_expand_file_adinicb(inode, inode->i_size, &err);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
inode->i_size = iinfo->i_lenAlloc;
- unlock_kernel();
+ up_write(&iinfo->i_data_sem);
return;
} else
udf_truncate_extents(inode);
@@ -1042,10 +1037,13 @@ void udf_truncate(struct inode *inode)
offset - udf_file_entry_alloc_offset(inode));
iinfo->i_lenAlloc = inode->i_size;
}
+ up_write(&iinfo->i_data_sem);
} else {
block_truncate_page(inode->i_mapping, inode->i_size,
udf_get_block);
+ down_write(&iinfo->i_data_sem);
udf_truncate_extents(inode);
+ up_write(&iinfo->i_data_sem);
}
inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
@@ -1053,7 +1051,6 @@ void udf_truncate(struct inode *inode)
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
- unlock_kernel();
}
static void __udf_read_inode(struct inode *inode)
@@ -1202,6 +1199,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
return;
}
+ read_lock(&sbi->s_cred_lock);
inode->i_uid = le32_to_cpu(fe->uid);
if (inode->i_uid == -1 ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
@@ -1214,13 +1212,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
- inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
- if (!inode->i_nlink)
- inode->i_nlink = 1;
-
- inode->i_size = le64_to_cpu(fe->informationLength);
- iinfo->i_lenExtents = inode->i_size;
-
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
@@ -1230,6 +1221,14 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
+ read_unlock(&sbi->s_cred_lock);
+
+ inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
+ if (!inode->i_nlink)
+ inode->i_nlink = 1;
+
+ inode->i_size = le64_to_cpu(fe->informationLength);
+ iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
@@ -1373,16 +1372,10 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- int ret;
-
- lock_kernel();
- ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
- unlock_kernel();
-
- return ret;
+ return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
-int udf_sync_inode(struct inode *inode)
+static int udf_sync_inode(struct inode *inode)
{
return udf_update_inode(inode, 1);
}
@@ -2048,7 +2041,7 @@ long udf_block_map(struct inode *inode, sector_t block)
struct extent_position epos = {};
int ret;
- lock_kernel();
+ down_read(&UDF_I(inode)->i_data_sem);
if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
(EXT_RECORDED_ALLOCATED >> 30))
@@ -2056,7 +2049,7 @@ long udf_block_map(struct inode *inode, sector_t block)
else
ret = 0;
- unlock_kernel();
+ up_read(&UDF_I(inode)->i_data_sem);
brelse(epos.bh);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 6d8dc02baeb..2be0f9eb86d 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/crc-itu-t.h>
@@ -228,10 +227,8 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) &&
- isdotdot) {
- brelse(epos.bh);
- return fi;
- }
+ isdotdot)
+ goto out_ok;
if (!lfi)
continue;
@@ -263,7 +260,6 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
if (dentry->d_name.len > UDF_NAME_LEN - 2)
return ERR_PTR(-ENAMETOOLONG);
- lock_kernel();
#ifdef UDF_RECOVERY
/* temporary shorthand for specifying files by inode number */
if (!strncmp(dentry->d_name.name, ".B=", 3)) {
@@ -275,7 +271,6 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
};
inode = udf_iget(dir->i_sb, lb);
if (!inode) {
- unlock_kernel();
return ERR_PTR(-EACCES);
}
} else
@@ -291,11 +286,9 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
loc = lelb_to_cpu(cfi.icb.extLocation);
inode = udf_iget(dir->i_sb, &loc);
if (!inode) {
- unlock_kernel();
return ERR_PTR(-EACCES);
}
}
- unlock_kernel();
return d_splice_alias(inode, dentry);
}
@@ -476,15 +469,19 @@ add:
f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
if (!fibh->ebh)
goto out_err;
+ /* Extents could have been merged, invalidate our position */
+ brelse(epos.bh);
+ epos.bh = NULL;
+ epos.block = dinfo->i_location;
+ epos.offset = udf_file_entry_alloc_offset(dir);
if (!fibh->soffset) {
- if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
- (EXT_RECORDED_ALLOCATED >> 30)) {
- block = eloc.logicalBlockNum + ((elen - 1) >>
+ /* Find the freshly allocated block */
+ while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
+ (EXT_RECORDED_ALLOCATED >> 30))
+ ;
+ block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
- } else
- block++;
-
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
@@ -562,10 +559,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
int err;
struct udf_inode_info *iinfo;
- lock_kernel();
inode = udf_new_inode(dir, mode, &err);
if (!inode) {
- unlock_kernel();
return err;
}
@@ -583,7 +578,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
inode->i_nlink--;
mark_inode_dirty(inode);
iput(inode);
- unlock_kernel();
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -596,7 +590,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
- unlock_kernel();
d_instantiate(dentry, inode);
return 0;
@@ -614,7 +607,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
if (!old_valid_dev(rdev))
return -EINVAL;
- lock_kernel();
err = -EIO;
inode = udf_new_inode(dir, mode, &err);
if (!inode)
@@ -627,7 +619,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
inode->i_nlink--;
mark_inode_dirty(inode);
iput(inode);
- unlock_kernel();
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -646,7 +637,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
err = 0;
out:
- unlock_kernel();
return err;
}
@@ -659,7 +649,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
struct udf_inode_info *dinfo = UDF_I(dir);
struct udf_inode_info *iinfo;
- lock_kernel();
err = -EMLINK;
if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
goto out;
@@ -712,7 +701,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
err = 0;
out:
- unlock_kernel();
return err;
}
@@ -794,7 +782,6 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
struct kernel_lb_addr tloc;
retval = -ENOENT;
- lock_kernel();
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
if (!fi)
goto out;
@@ -826,7 +813,6 @@ end_rmdir:
brelse(fibh.sbh);
out:
- unlock_kernel();
return retval;
}
@@ -840,7 +826,6 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
struct kernel_lb_addr tloc;
retval = -ENOENT;
- lock_kernel();
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
if (!fi)
goto out;
@@ -870,7 +855,6 @@ end_unlink:
brelse(fibh.sbh);
out:
- unlock_kernel();
return retval;
}
@@ -890,21 +874,21 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
int block;
unsigned char *name = NULL;
int namelen;
- struct buffer_head *bh;
struct udf_inode_info *iinfo;
+ struct super_block *sb = dir->i_sb;
- lock_kernel();
inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
if (!inode)
goto out;
+ iinfo = UDF_I(inode);
+ down_write(&iinfo->i_data_sem);
name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto out_no_entry;
}
- iinfo = UDF_I(inode);
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
@@ -912,7 +896,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
struct kernel_lb_addr eloc;
uint32_t bsize;
- block = udf_new_block(inode->i_sb, inode,
+ block = udf_new_block(sb, inode,
iinfo->i_location.partitionReferenceNum,
iinfo->i_location.logicalBlockNum, &err);
if (!block)
@@ -923,17 +907,17 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
eloc.logicalBlockNum = block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
- bsize = inode->i_sb->s_blocksize;
+ bsize = sb->s_blocksize;
iinfo->i_lenExtents = bsize;
udf_add_aext(inode, &epos, &eloc, bsize, 0);
brelse(epos.bh);
- block = udf_get_pblock(inode->i_sb, block,
+ block = udf_get_pblock(sb, block,
iinfo->i_location.partitionReferenceNum,
0);
- epos.bh = udf_tgetblk(inode->i_sb, block);
+ epos.bh = udf_tgetblk(sb, block);
lock_buffer(epos.bh);
- memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
+ memset(epos.bh->b_data, 0x00, bsize);
set_buffer_uptodate(epos.bh);
unlock_buffer(epos.bh);
mark_buffer_dirty_inode(epos.bh, inode);
@@ -941,7 +925,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
} else
ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
- eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
+ eoffset = sb->s_blocksize - udf_ext0_offset(inode);
pc = (struct pathComponent *)ea;
if (*symname == '/') {
@@ -981,7 +965,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
}
if (pc->componentType == 5) {
- namelen = udf_put_filename(inode->i_sb, compstart, name,
+ namelen = udf_put_filename(sb, compstart, name,
symname - compstart);
if (!namelen)
goto out_no_entry;
@@ -1015,27 +999,16 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (!fi)
goto out_no_entry;
- cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+ cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
- bh = UDF_SB(inode->i_sb)->s_lvid_bh;
- if (bh) {
- struct logicalVolIntegrityDesc *lvid =
- (struct logicalVolIntegrityDesc *)bh->b_data;
- struct logicalVolHeaderDesc *lvhd;
- uint64_t uniqueID;
- lvhd = (struct logicalVolHeaderDesc *)
- lvid->logicalVolContentsUse;
- uniqueID = le64_to_cpu(lvhd->uniqueID);
+ if (UDF_SB(inode->i_sb)->s_lvid_bh) {
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
- cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
- if (!(++uniqueID & 0x00000000FFFFFFFFUL))
- uniqueID += 16;
- lvhd->uniqueID = cpu_to_le64(uniqueID);
- mark_buffer_dirty(bh);
+ cpu_to_le32(lvid_get_unique_id(sb));
}
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(dir);
+ up_write(&iinfo->i_data_sem);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
@@ -1044,10 +1017,10 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
out:
kfree(name);
- unlock_kernel();
return err;
out_no_entry:
+ up_write(&iinfo->i_data_sem);
inode_dec_link_count(inode);
iput(inode);
goto out;
@@ -1060,36 +1033,20 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
- struct buffer_head *bh;
- lock_kernel();
if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
- unlock_kernel();
return -EMLINK;
}
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (!fi) {
- unlock_kernel();
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
- bh = UDF_SB(inode->i_sb)->s_lvid_bh;
- if (bh) {
- struct logicalVolIntegrityDesc *lvid =
- (struct logicalVolIntegrityDesc *)bh->b_data;
- struct logicalVolHeaderDesc *lvhd;
- uint64_t uniqueID;
- lvhd = (struct logicalVolHeaderDesc *)
- (lvid->logicalVolContentsUse);
- uniqueID = le64_to_cpu(lvhd->uniqueID);
+ if (UDF_SB(inode->i_sb)->s_lvid_bh) {
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
- cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
- if (!(++uniqueID & 0x00000000FFFFFFFFUL))
- uniqueID += 16;
- lvhd->uniqueID = cpu_to_le64(uniqueID);
- mark_buffer_dirty(bh);
+ cpu_to_le32(lvid_get_unique_id(inode->i_sb));
}
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
@@ -1103,7 +1060,6 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
mark_inode_dirty(inode);
ihold(inode);
d_instantiate(dentry, inode);
- unlock_kernel();
return 0;
}
@@ -1124,7 +1080,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
struct kernel_lb_addr tloc;
struct udf_inode_info *old_iinfo = UDF_I(old_inode);
- lock_kernel();
ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
if (ofi) {
if (ofibh.sbh != ofibh.ebh)
@@ -1248,7 +1203,6 @@ end_rename:
brelse(nfibh.ebh);
brelse(nfibh.sbh);
}
- unlock_kernel();
return retval;
}
@@ -1261,7 +1215,6 @@ static struct dentry *udf_get_parent(struct dentry *child)
struct fileIdentDesc cfi;
struct udf_fileident_bh fibh;
- lock_kernel();
if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
goto out_unlock;
@@ -1273,11 +1226,9 @@ static struct dentry *udf_get_parent(struct dentry *child)
inode = udf_iget(child->d_inode->i_sb, &tloc);
if (!inode)
goto out_unlock;
- unlock_kernel();
return d_obtain_alias(inode);
out_unlock:
- unlock_kernel();
return ERR_PTR(-EACCES);
}
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 745eb209be0..a71090ea0e0 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -25,6 +25,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
+#include <linux/mutex.h>
uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
uint16_t partition, uint32_t offset)
@@ -159,7 +160,9 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
struct udf_sb_info *sbi = UDF_SB(sb);
u16 reallocationTableLen;
struct buffer_head *bh;
+ int ret = 0;
+ mutex_lock(&sbi->s_alloc_mutex);
for (i = 0; i < sbi->s_partitions; i++) {
struct udf_part_map *map = &sbi->s_partmaps[i];
if (old_block > map->s_partition_root &&
@@ -175,8 +178,10 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
break;
}
- if (!st)
- return 1;
+ if (!st) {
+ ret = 1;
+ goto out;
+ }
reallocationTableLen =
le16_to_cpu(st->reallocationTableLen);
@@ -207,14 +212,16 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
((old_block -
map->s_partition_root) &
(sdata->s_packet_len - 1));
- return 0;
+ ret = 0;
+ goto out;
} else if (origLoc == packet) {
*new_block = le32_to_cpu(
entry->mappedLocation) +
((old_block -
map->s_partition_root) &
(sdata->s_packet_len - 1));
- return 0;
+ ret = 0;
+ goto out;
} else if (origLoc > packet)
break;
}
@@ -251,20 +258,24 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
st->mapEntry[k].mappedLocation) +
((old_block - map->s_partition_root) &
(sdata->s_packet_len - 1));
- return 0;
+ ret = 0;
+ goto out;
}
- return 1;
+ ret = 1;
+ goto out;
} /* if old_block */
}
if (i == sbi->s_partitions) {
/* outside of partitions */
/* for now, fail =) */
- return 1;
+ ret = 1;
}
- return 0;
+out:
+ mutex_unlock(&sbi->s_alloc_mutex);
+ return ret;
}
static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
diff --git a/fs/udf/super.c b/fs/udf/super.c
index b539d53320f..7b27b063ff6 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -48,7 +48,6 @@
#include <linux/stat.h>
#include <linux/cdrom.h>
#include <linux/nls.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/vmalloc.h>
@@ -135,6 +134,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
ei->i_next_alloc_block = 0;
ei->i_next_alloc_goal = 0;
ei->i_strat4096 = 0;
+ init_rwsem(&ei->i_data_sem);
return &ei->vfs_inode;
}
@@ -574,13 +574,14 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
if (!udf_parse_options(options, &uopt, true))
return -EINVAL;
- lock_kernel();
+ write_lock(&sbi->s_cred_lock);
sbi->s_flags = uopt.flags;
sbi->s_uid = uopt.uid;
sbi->s_gid = uopt.gid;
sbi->s_umask = uopt.umask;
sbi->s_fmode = uopt.fmode;
sbi->s_dmode = uopt.dmode;
+ write_unlock(&sbi->s_cred_lock);
if (sbi->s_lvid_bh) {
int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
@@ -597,7 +598,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
udf_open_lvid(sb);
out_unlock:
- unlock_kernel();
return error;
}
@@ -966,9 +966,9 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
(sizeof(struct buffer_head *) * nr_groups);
if (size <= PAGE_SIZE)
- bitmap = kmalloc(size, GFP_KERNEL);
+ bitmap = kzalloc(size, GFP_KERNEL);
else
- bitmap = vmalloc(size); /* TODO: get rid of vmalloc */
+ bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
if (bitmap == NULL) {
udf_error(sb, __func__,
@@ -977,7 +977,6 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
return NULL;
}
- memset(bitmap, 0x00, size);
bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
bitmap->s_nr_groups = nr_groups;
return bitmap;
@@ -1781,6 +1780,8 @@ static void udf_open_lvid(struct super_block *sb)
if (!bh)
return;
+
+ mutex_lock(&sbi->s_alloc_mutex);
lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
lvidiu = udf_sb_lvidiu(sbi);
@@ -1797,6 +1798,7 @@ static void udf_open_lvid(struct super_block *sb)
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
+ mutex_unlock(&sbi->s_alloc_mutex);
}
static void udf_close_lvid(struct super_block *sb)
@@ -1809,6 +1811,7 @@ static void udf_close_lvid(struct super_block *sb)
if (!bh)
return;
+ mutex_lock(&sbi->s_alloc_mutex);
lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
lvidiu = udf_sb_lvidiu(sbi);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1829,6 +1832,34 @@ static void udf_close_lvid(struct super_block *sb)
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
+ mutex_unlock(&sbi->s_alloc_mutex);
+}
+
+u64 lvid_get_unique_id(struct super_block *sb)
+{
+ struct buffer_head *bh;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct logicalVolIntegrityDesc *lvid;
+ struct logicalVolHeaderDesc *lvhd;
+ u64 uniqueID;
+ u64 ret;
+
+ bh = sbi->s_lvid_bh;
+ if (!bh)
+ return 0;
+
+ lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+ lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
+
+ mutex_lock(&sbi->s_alloc_mutex);
+ ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
+ if (!(++uniqueID & 0xFFFFFFFF))
+ uniqueID += 16;
+ lvhd->uniqueID = cpu_to_le64(uniqueID);
+ mutex_unlock(&sbi->s_alloc_mutex);
+ mark_buffer_dirty(bh);
+
+ return ret;
}
static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1886,8 +1917,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
struct kernel_lb_addr rootdir, fileset;
struct udf_sb_info *sbi;
- lock_kernel();
-
uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
uopt.uid = -1;
uopt.gid = -1;
@@ -1896,10 +1925,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
uopt.dmode = UDF_INVALID_MODE;
sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
- if (!sbi) {
- unlock_kernel();
+ if (!sbi)
return -ENOMEM;
- }
sb->s_fs_info = sbi;
@@ -1936,6 +1963,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sbi->s_fmode = uopt.fmode;
sbi->s_dmode = uopt.dmode;
sbi->s_nls_map = uopt.nls_map;
+ rwlock_init(&sbi->s_cred_lock);
if (uopt.session == 0xFFFFFFFF)
sbi->s_session = udf_get_last_session(sb);
@@ -2045,7 +2073,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
goto error_out;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
- unlock_kernel();
return 0;
error_out:
@@ -2066,7 +2093,6 @@ error_out:
kfree(sbi);
sb->s_fs_info = NULL;
- unlock_kernel();
return -EINVAL;
}
@@ -2105,8 +2131,6 @@ static void udf_put_super(struct super_block *sb)
sbi = UDF_SB(sb);
- lock_kernel();
-
if (sbi->s_vat_inode)
iput(sbi->s_vat_inode);
if (sbi->s_partitions)
@@ -2122,8 +2146,6 @@ static void udf_put_super(struct super_block *sb)
kfree(sbi->s_partmaps);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
-
- unlock_kernel();
}
static int udf_sync_fs(struct super_block *sb, int wait)
@@ -2186,8 +2208,6 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
uint16_t ident;
struct spaceBitmapDesc *bm;
- lock_kernel();
-
loc.logicalBlockNum = bitmap->s_extPosition;
loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
bh = udf_read_ptagged(sb, &loc, 0, &ident);
@@ -2224,10 +2244,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
}
}
brelse(bh);
-
out:
- unlock_kernel();
-
return accum;
}
@@ -2240,8 +2257,7 @@ static unsigned int udf_count_free_table(struct super_block *sb,
int8_t etype;
struct extent_position epos;
- lock_kernel();
-
+ mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
epos.block = UDF_I(table)->i_location;
epos.offset = sizeof(struct unallocSpaceEntry);
epos.bh = NULL;
@@ -2250,8 +2266,7 @@ static unsigned int udf_count_free_table(struct super_block *sb,
accum += (elen >> table->i_sb->s_blocksize_bits);
brelse(epos.bh);
-
- unlock_kernel();
+ mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
return accum;
}
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 16064787d2b..b1d4488b0f1 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -27,7 +27,6 @@
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
@@ -78,13 +77,16 @@ static int udf_symlink_filler(struct file *file, struct page *page)
int err = -EIO;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
+ uint32_t pos;
- lock_kernel();
iinfo = UDF_I(inode);
+ pos = udf_block_map(inode, 0);
+
+ down_read(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
} else {
- bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
+ bh = sb_bread(inode->i_sb, pos);
if (!bh)
goto out;
@@ -95,14 +97,14 @@ static int udf_symlink_filler(struct file *file, struct page *page)
udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
brelse(bh);
- unlock_kernel();
+ up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
out:
- unlock_kernel();
+ up_read(&iinfo->i_data_sem);
SetPageError(page);
kunmap(page);
unlock_page(page);
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index e58d1de4107..d1bd31ea724 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,6 +1,18 @@
#ifndef _UDF_I_H
#define _UDF_I_H
+/*
+ * The i_data_sem and i_mutex serve for protection of allocation information
+ * of a regular files and symlinks. This includes all extents belonging to
+ * the file/symlink, a fact whether data are in-inode or in external data
+ * blocks, preallocation, goal block information... When extents are read,
+ * i_mutex or i_data_sem must be held (for reading is enough in case of
+ * i_data_sem). When extents are changed, i_data_sem must be held for writing
+ * and also i_mutex must be held.
+ *
+ * For directories i_mutex is used for all the necessary protection.
+ */
+
struct udf_inode_info {
struct timespec i_crtime;
/* Physical address of inode */
@@ -21,6 +33,7 @@ struct udf_inode_info {
struct long_ad *i_lad;
__u8 *i_data;
} i_ext;
+ struct rw_semaphore i_data_sem;
struct inode vfs_inode;
};
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index d113b72c276..4858c191242 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -2,6 +2,7 @@
#define __LINUX_UDF_SB_H
#include <linux/mutex.h>
+#include <linux/bitops.h>
/* Since UDF 2.01 is ISO 13346 based... */
#define UDF_SUPER_MAGIC 0x15013346
@@ -128,6 +129,8 @@ struct udf_sb_info {
uid_t s_uid;
mode_t s_fmode;
mode_t s_dmode;
+ /* Lock protecting consistency of above permission settings */
+ rwlock_t s_cred_lock;
/* Root Info */
struct timespec s_record_time;
@@ -139,7 +142,7 @@ struct udf_sb_info {
__u16 s_udfrev;
/* Miscellaneous flags */
- __u32 s_flags;
+ unsigned long s_flags;
/* Encoding info */
struct nls_table *s_nls_map;
@@ -161,8 +164,19 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
int udf_compute_nr_groups(struct super_block *sb, u32 partition);
-#define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
-#define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
-#define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
+static inline int UDF_QUERY_FLAG(struct super_block *sb, int flag)
+{
+ return test_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_SET_FLAG(struct super_block *sb, int flag)
+{
+ set_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_CLEAR_FLAG(struct super_block *sb, int flag)
+{
+ clear_bit(flag, &UDF_SB(sb)->s_flags);
+}
#endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 6995ab1f430..eba48209f9f 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -111,6 +111,8 @@ struct extent_position {
};
/* super.c */
+
+__attribute__((format(printf, 3, 4)))
extern void udf_warning(struct super_block *, const char *, const char *, ...);
static inline void udf_updated_lvid(struct super_block *sb)
{
@@ -123,6 +125,7 @@ static inline void udf_updated_lvid(struct super_block *sb)
sb->s_dirt = 1;
UDF_SB(sb)->s_lvid_dirty = 1;
}
+extern u64 lvid_get_unique_id(struct super_block *sb);
/* namei.c */
extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
@@ -133,7 +136,6 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
/* inode.c */
extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
-extern int udf_sync_inode(struct inode *);
extern void udf_expand_file_adinicb(struct inode *, int, int *);
extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
deleted file mode 100644
index 4dfc7c37081..00000000000
--- a/fs/xfs/linux-2.6/sv.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_SV_H__
-#define __XFS_SUPPORT_SV_H__
-
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-
-/*
- * Synchronisation variables.
- *
- * (Parameters "pri", "svf" and "rts" are not implemented)
- */
-
-typedef struct sv_s {
- wait_queue_head_t waiters;
-} sv_t;
-
-static inline void _sv_wait(sv_t *sv, spinlock_t *lock)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue_exclusive(&sv->waiters, &wait);
- __set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock(lock);
-
- schedule();
-
- remove_wait_queue(&sv->waiters, &wait);
-}
-
-#define sv_init(sv,flag,name) \
- init_waitqueue_head(&(sv)->waiters)
-#define sv_destroy(sv) \
- /*NOTHING*/
-#define sv_wait(sv, pri, lock, s) \
- _sv_wait(sv, lock)
-#define sv_signal(sv) \
- wake_up(&(sv)->waiters)
-#define sv_broadcast(sv) \
- wake_up_all(&(sv)->waiters)
-
-#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 691f61223ed..ec7bbb5645b 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,15 +38,6 @@
#include <linux/pagevec.h>
#include <linux/writeback.h>
-/*
- * Types of I/O for bmap clustering and I/O completion tracking.
- */
-enum {
- IO_READ, /* mapping for a read */
- IO_DELAY, /* mapping covers delalloc region */
- IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
- IO_NEW /* just allocated */
-};
/*
* Prime number of hash buckets since address is used as the key.
@@ -182,9 +173,6 @@ xfs_setfilesize(
xfs_inode_t *ip = XFS_I(ioend->io_inode);
xfs_fsize_t isize;
- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
- ASSERT(ioend->io_type != IO_READ);
-
if (unlikely(ioend->io_error))
return 0;
@@ -244,10 +232,8 @@ xfs_end_io(
* We might have to update the on-disk file size after extending
* writes.
*/
- if (ioend->io_type != IO_READ) {
- error = xfs_setfilesize(ioend);
- ASSERT(!error || error == EAGAIN);
- }
+ error = xfs_setfilesize(ioend);
+ ASSERT(!error || error == EAGAIN);
/*
* If we didn't complete processing of the ioend, requeue it to the
@@ -318,14 +304,63 @@ STATIC int
xfs_map_blocks(
struct inode *inode,
loff_t offset,
- ssize_t count,
struct xfs_bmbt_irec *imap,
- int flags)
+ int type,
+ int nonblocking)
{
- int nmaps = 1;
- int new = 0;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ ssize_t count = 1 << inode->i_blkbits;
+ xfs_fileoff_t offset_fsb, end_fsb;
+ int error = 0;
+ int bmapi_flags = XFS_BMAPI_ENTIRE;
+ int nimaps = 1;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -XFS_ERROR(EIO);
+
+ if (type == IO_UNWRITTEN)
+ bmapi_flags |= XFS_BMAPI_IGSTATE;
+
+ if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+ if (nonblocking)
+ return -XFS_ERROR(EAGAIN);
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ }
- return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
+ ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+ (ip->i_df.if_flags & XFS_IFEXTENTS));
+ ASSERT(offset <= mp->m_maxioffset);
+
+ if (offset + count > mp->m_maxioffset)
+ count = mp->m_maxioffset - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+ bmapi_flags, NULL, 0, imap, &nimaps, NULL);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+ if (error)
+ return -XFS_ERROR(error);
+
+ if (type == IO_DELALLOC &&
+ (!nimaps || isnullstartblock(imap->br_startblock))) {
+ error = xfs_iomap_write_allocate(ip, offset, count, imap);
+ if (!error)
+ trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
+ return -XFS_ERROR(error);
+ }
+
+#ifdef DEBUG
+ if (type == IO_UNWRITTEN) {
+ ASSERT(nimaps);
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+ }
+#endif
+ if (nimaps)
+ trace_xfs_map_blocks_found(ip, offset, count, type, imap);
+ return 0;
}
STATIC int
@@ -380,26 +415,18 @@ xfs_submit_ioend_bio(
submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC_PLUG : WRITE, bio);
- ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
- bio_put(bio);
}
STATIC struct bio *
xfs_alloc_ioend_bio(
struct buffer_head *bh)
{
- struct bio *bio;
int nvecs = bio_get_nr_vecs(bh->b_bdev);
-
- do {
- bio = bio_alloc(GFP_NOIO, nvecs);
- nvecs >>= 1;
- } while (!bio);
+ struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
ASSERT(bio->bi_private == NULL);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
- bio_get(bio);
return bio;
}
@@ -470,9 +497,8 @@ xfs_submit_ioend(
/* Pass 1 - start writeback */
do {
next = ioend->io_list;
- for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+ for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
xfs_start_buffer_writeback(bh);
- }
} while ((ioend = next) != NULL);
/* Pass 2 - submit I/O */
@@ -600,117 +626,13 @@ xfs_map_at_offset(
ASSERT(imap->br_startblock != HOLESTARTBLOCK);
ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
- lock_buffer(bh);
xfs_map_buffer(inode, bh, imap, offset);
- bh->b_bdev = xfs_find_bdev_for_inode(inode);
set_buffer_mapped(bh);
clear_buffer_delay(bh);
clear_buffer_unwritten(bh);
}
/*
- * Look for a page at index that is suitable for clustering.
- */
-STATIC unsigned int
-xfs_probe_page(
- struct page *page,
- unsigned int pg_offset)
-{
- struct buffer_head *bh, *head;
- int ret = 0;
-
- if (PageWriteback(page))
- return 0;
- if (!PageDirty(page))
- return 0;
- if (!page->mapping)
- return 0;
- if (!page_has_buffers(page))
- return 0;
-
- bh = head = page_buffers(page);
- do {
- if (!buffer_uptodate(bh))
- break;
- if (!buffer_mapped(bh))
- break;
- ret += bh->b_size;
- if (ret >= pg_offset)
- break;
- } while ((bh = bh->b_this_page) != head);
-
- return ret;
-}
-
-STATIC size_t
-xfs_probe_cluster(
- struct inode *inode,
- struct page *startpage,
- struct buffer_head *bh,
- struct buffer_head *head)
-{
- struct pagevec pvec;
- pgoff_t tindex, tlast, tloff;
- size_t total = 0;
- int done = 0, i;
-
- /* First sum forwards in this page */
- do {
- if (!buffer_uptodate(bh) || !buffer_mapped(bh))
- return total;
- total += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
-
- /* if we reached the end of the page, sum forwards in following pages */
- tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
- tindex = startpage->index + 1;
-
- /* Prune this back to avoid pathological behavior */
- tloff = min(tlast, startpage->index + 64);
-
- pagevec_init(&pvec, 0);
- while (!done && tindex <= tloff) {
- unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
-
- if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
- break;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- size_t pg_offset, pg_len = 0;
-
- if (tindex == tlast) {
- pg_offset =
- i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
- if (!pg_offset) {
- done = 1;
- break;
- }
- } else
- pg_offset = PAGE_CACHE_SIZE;
-
- if (page->index == tindex && trylock_page(page)) {
- pg_len = xfs_probe_page(page, pg_offset);
- unlock_page(page);
- }
-
- if (!pg_len) {
- done = 1;
- break;
- }
-
- total += pg_len;
- tindex++;
- }
-
- pagevec_release(&pvec);
- cond_resched();
- }
-
- return total;
-}
-
-/*
* Test if a given page is suitable for writing as part of an unwritten
* or delayed allocate extent.
*/
@@ -731,9 +653,9 @@ xfs_is_delayed_page(
if (buffer_unwritten(bh))
acceptable = (type == IO_UNWRITTEN);
else if (buffer_delay(bh))
- acceptable = (type == IO_DELAY);
+ acceptable = (type == IO_DELALLOC);
else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == IO_NEW);
+ acceptable = (type == IO_OVERWRITE);
else
break;
} while ((bh = bh->b_this_page) != head);
@@ -758,8 +680,7 @@ xfs_convert_page(
loff_t tindex,
struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
- struct writeback_control *wbc,
- int all_bh)
+ struct writeback_control *wbc)
{
struct buffer_head *bh, *head;
xfs_off_t end_offset;
@@ -814,37 +735,30 @@ xfs_convert_page(
continue;
}
- if (buffer_unwritten(bh) || buffer_delay(bh)) {
+ if (buffer_unwritten(bh) || buffer_delay(bh) ||
+ buffer_mapped(bh)) {
if (buffer_unwritten(bh))
type = IO_UNWRITTEN;
+ else if (buffer_delay(bh))
+ type = IO_DELALLOC;
else
- type = IO_DELAY;
+ type = IO_OVERWRITE;
if (!xfs_imap_valid(inode, imap, offset)) {
done = 1;
continue;
}
- ASSERT(imap->br_startblock != HOLESTARTBLOCK);
- ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
- xfs_map_at_offset(inode, bh, imap, offset);
+ lock_buffer(bh);
+ if (type != IO_OVERWRITE)
+ xfs_map_at_offset(inode, bh, imap, offset);
xfs_add_to_ioend(inode, bh, offset, type,
ioendp, done);
page_dirty--;
count++;
} else {
- type = IO_NEW;
- if (buffer_mapped(bh) && all_bh) {
- lock_buffer(bh);
- xfs_add_to_ioend(inode, bh, offset,
- type, ioendp, done);
- count++;
- page_dirty--;
- } else {
- done = 1;
- }
+ done = 1;
}
} while (offset += len, (bh = bh->b_this_page) != head);
@@ -876,7 +790,6 @@ xfs_cluster_write(
struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
- int all_bh,
pgoff_t tlast)
{
struct pagevec pvec;
@@ -891,7 +804,7 @@ xfs_cluster_write(
for (i = 0; i < pagevec_count(&pvec); i++) {
done = xfs_convert_page(inode, pvec.pages[i], tindex++,
- imap, ioendp, wbc, all_bh);
+ imap, ioendp, wbc);
if (done)
break;
}
@@ -935,7 +848,7 @@ xfs_aops_discard_page(
struct buffer_head *bh, *head;
loff_t offset = page_offset(page);
- if (!xfs_is_delayed_page(page, IO_DELAY))
+ if (!xfs_is_delayed_page(page, IO_DELALLOC))
goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1002,10 +915,10 @@ xfs_vm_writepage(
unsigned int type;
__uint64_t end_offset;
pgoff_t end_index, last_index;
- ssize_t size, len;
- int flags, err, imap_valid = 0, uptodate = 1;
+ ssize_t len;
+ int err, imap_valid = 0, uptodate = 1;
int count = 0;
- int all_bh = 0;
+ int nonblocking = 0;
trace_xfs_writepage(inode, page, 0);
@@ -1056,10 +969,14 @@ xfs_vm_writepage(
bh = head = page_buffers(page);
offset = page_offset(page);
- flags = BMAPI_READ;
- type = IO_NEW;
+ type = IO_OVERWRITE;
+
+ if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+ nonblocking = 1;
do {
+ int new_ioend = 0;
+
if (offset >= end_offset)
break;
if (!buffer_uptodate(bh))
@@ -1076,90 +993,54 @@ xfs_vm_writepage(
continue;
}
- if (imap_valid)
- imap_valid = xfs_imap_valid(inode, &imap, offset);
-
- if (buffer_unwritten(bh) || buffer_delay(bh)) {
- int new_ioend = 0;
-
- /*
- * Make sure we don't use a read-only iomap
- */
- if (flags == BMAPI_READ)
- imap_valid = 0;
-
- if (buffer_unwritten(bh)) {
+ if (buffer_unwritten(bh)) {
+ if (type != IO_UNWRITTEN) {
type = IO_UNWRITTEN;
- flags = BMAPI_WRITE | BMAPI_IGNSTATE;
- } else if (buffer_delay(bh)) {
- type = IO_DELAY;
- flags = BMAPI_ALLOCATE;
-
- if (wbc->sync_mode == WB_SYNC_NONE)
- flags |= BMAPI_TRYLOCK;
- }
-
- if (!imap_valid) {
- /*
- * If we didn't have a valid mapping then we
- * need to ensure that we put the new mapping
- * in a new ioend structure. This needs to be
- * done to ensure that the ioends correctly
- * reflect the block mappings at io completion
- * for unwritten extent conversion.
- */
- new_ioend = 1;
- err = xfs_map_blocks(inode, offset, len,
- &imap, flags);
- if (err)
- goto error;
- imap_valid = xfs_imap_valid(inode, &imap,
- offset);
+ imap_valid = 0;
}
- if (imap_valid) {
- xfs_map_at_offset(inode, bh, &imap, offset);
- xfs_add_to_ioend(inode, bh, offset, type,
- &ioend, new_ioend);
- count++;
+ } else if (buffer_delay(bh)) {
+ if (type != IO_DELALLOC) {
+ type = IO_DELALLOC;
+ imap_valid = 0;
}
} else if (buffer_uptodate(bh)) {
- /*
- * we got here because the buffer is already mapped.
- * That means it must already have extents allocated
- * underneath it. Map the extent by reading it.
- */
- if (!imap_valid || flags != BMAPI_READ) {
- flags = BMAPI_READ;
- size = xfs_probe_cluster(inode, page, bh, head);
- err = xfs_map_blocks(inode, offset, size,
- &imap, flags);
- if (err)
- goto error;
- imap_valid = xfs_imap_valid(inode, &imap,
- offset);
+ if (type != IO_OVERWRITE) {
+ type = IO_OVERWRITE;
+ imap_valid = 0;
}
+ } else {
+ if (PageUptodate(page)) {
+ ASSERT(buffer_mapped(bh));
+ imap_valid = 0;
+ }
+ continue;
+ }
+ if (imap_valid)
+ imap_valid = xfs_imap_valid(inode, &imap, offset);
+ if (!imap_valid) {
/*
- * We set the type to IO_NEW in case we are doing a
- * small write at EOF that is extending the file but
- * without needing an allocation. We need to update the
- * file size on I/O completion in this case so it is
- * the same case as having just allocated a new extent
- * that we are writing into for the first time.
+ * If we didn't have a valid mapping then we need to
+ * put the new mapping into a separate ioend structure.
+ * This ensures non-contiguous extents always have
+ * separate ioends, which is particularly important
+ * for unwritten extent conversion at I/O completion
+ * time.
*/
- type = IO_NEW;
- if (trylock_buffer(bh)) {
- if (imap_valid)
- all_bh = 1;
- xfs_add_to_ioend(inode, bh, offset, type,
- &ioend, !imap_valid);
- count++;
- } else {
- imap_valid = 0;
- }
- } else if (PageUptodate(page)) {
- ASSERT(buffer_mapped(bh));
- imap_valid = 0;
+ new_ioend = 1;
+ err = xfs_map_blocks(inode, offset, &imap, type,
+ nonblocking);
+ if (err)
+ goto error;
+ imap_valid = xfs_imap_valid(inode, &imap, offset);
+ }
+ if (imap_valid) {
+ lock_buffer(bh);
+ if (type != IO_OVERWRITE)
+ xfs_map_at_offset(inode, bh, &imap, offset);
+ xfs_add_to_ioend(inode, bh, offset, type, &ioend,
+ new_ioend);
+ count++;
}
if (!iohead)
@@ -1188,7 +1069,7 @@ xfs_vm_writepage(
end_index = last_index;
xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
- wbc, all_bh, end_index);
+ wbc, end_index);
}
if (iohead)
@@ -1257,13 +1138,19 @@ __xfs_get_blocks(
int create,
int direct)
{
- int flags = create ? BMAPI_WRITE : BMAPI_READ;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb, end_fsb;
+ int error = 0;
+ int lockmode = 0;
struct xfs_bmbt_irec imap;
+ int nimaps = 1;
xfs_off_t offset;
ssize_t size;
- int nimap = 1;
int new = 0;
- int error;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -XFS_ERROR(EIO);
offset = (xfs_off_t)iblock << inode->i_blkbits;
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
@@ -1272,15 +1159,45 @@ __xfs_get_blocks(
if (!create && direct && offset >= i_size_read(inode))
return 0;
- if (direct && create)
- flags |= BMAPI_DIRECT;
+ if (create) {
+ lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, lockmode);
+ } else {
+ lockmode = xfs_ilock_map_shared(ip);
+ }
+
+ ASSERT(offset <= mp->m_maxioffset);
+ if (offset + size > mp->m_maxioffset)
+ size = mp->m_maxioffset - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
- error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
- &new);
+ error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+ XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL);
if (error)
- return -error;
- if (nimap == 0)
- return 0;
+ goto out_unlock;
+
+ if (create &&
+ (!nimaps ||
+ (imap.br_startblock == HOLESTARTBLOCK ||
+ imap.br_startblock == DELAYSTARTBLOCK))) {
+ if (direct) {
+ error = xfs_iomap_write_direct(ip, offset, size,
+ &imap, nimaps);
+ } else {
+ error = xfs_iomap_write_delay(ip, offset, size, &imap);
+ }
+ if (error)
+ goto out_unlock;
+
+ trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
+ } else if (nimaps) {
+ trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
+ } else {
+ trace_xfs_get_blocks_notfound(ip, offset, size);
+ goto out_unlock;
+ }
+ xfs_iunlock(ip, lockmode);
if (imap.br_startblock != HOLESTARTBLOCK &&
imap.br_startblock != DELAYSTARTBLOCK) {
@@ -1347,6 +1264,10 @@ __xfs_get_blocks(
}
return 0;
+
+out_unlock:
+ xfs_iunlock(ip, lockmode);
+ return -error;
}
int
@@ -1434,7 +1355,7 @@ xfs_vm_direct_IO(
ssize_t ret;
if (rw & WRITE) {
- iocb->private = xfs_alloc_ioend(inode, IO_NEW);
+ iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
offset, nr_segs,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index c5057fb6237..71f721e1a71 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -23,6 +23,22 @@ extern struct workqueue_struct *xfsconvertd_workqueue;
extern mempool_t *xfs_ioend_pool;
/*
+ * Types of I/O for bmap clustering and I/O completion tracking.
+ */
+enum {
+ IO_DIRECT = 0, /* special case for direct I/O ioends */
+ IO_DELALLOC, /* mapping covers delalloc region */
+ IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
+ IO_OVERWRITE, /* mapping covers already allocated extent */
+};
+
+#define XFS_IO_TYPES \
+ { 0, "" }, \
+ { IO_DELALLOC, "delalloc" }, \
+ { IO_UNWRITTEN, "unwritten" }, \
+ { IO_OVERWRITE, "overwrite" }
+
+/*
* xfs_ioend struct manages large extent writes for XFS.
* It can manage several multi-page bio's at once.
*/
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4c5deb6e9e3..92f1f2acc6a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -44,12 +44,7 @@
static kmem_zone_t *xfs_buf_zone;
STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
-static struct shrinker xfs_buf_shake = {
- .shrink = xfsbufd_wakeup,
- .seeks = DEFAULT_SEEKS,
-};
static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
@@ -168,8 +163,79 @@ test_page_region(
}
/*
- * Internal xfs_buf_t object manipulation
+ * xfs_buf_lru_add - add a buffer to the LRU.
+ *
+ * The LRU takes a new reference to the buffer so that it will only be freed
+ * once the shrinker takes the buffer off the LRU.
*/
+STATIC void
+xfs_buf_lru_add(
+ struct xfs_buf *bp)
+{
+ struct xfs_buftarg *btp = bp->b_target;
+
+ spin_lock(&btp->bt_lru_lock);
+ if (list_empty(&bp->b_lru)) {
+ atomic_inc(&bp->b_hold);
+ list_add_tail(&bp->b_lru, &btp->bt_lru);
+ btp->bt_lru_nr++;
+ }
+ spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * xfs_buf_lru_del - remove a buffer from the LRU
+ *
+ * The unlocked check is safe here because it only occurs when there are not
+ * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
+ * to optimise the shrinker removing the buffer from the LRU and calling
+ * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
+ * bt_lru_lock.
+ */
+STATIC void
+xfs_buf_lru_del(
+ struct xfs_buf *bp)
+{
+ struct xfs_buftarg *btp = bp->b_target;
+
+ if (list_empty(&bp->b_lru))
+ return;
+
+ spin_lock(&btp->bt_lru_lock);
+ if (!list_empty(&bp->b_lru)) {
+ list_del_init(&bp->b_lru);
+ btp->bt_lru_nr--;
+ }
+ spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * When we mark a buffer stale, we remove the buffer from the LRU and clear the
+ * b_lru_ref count so that the buffer is freed immediately when the buffer
+ * reference count falls to zero. If the buffer is already on the LRU, we need
+ * to remove the reference that LRU holds on the buffer.
+ *
+ * This prevents build-up of stale buffers on the LRU.
+ */
+void
+xfs_buf_stale(
+ struct xfs_buf *bp)
+{
+ bp->b_flags |= XBF_STALE;
+ atomic_set(&(bp)->b_lru_ref, 0);
+ if (!list_empty(&bp->b_lru)) {
+ struct xfs_buftarg *btp = bp->b_target;
+
+ spin_lock(&btp->bt_lru_lock);
+ if (!list_empty(&bp->b_lru)) {
+ list_del_init(&bp->b_lru);
+ btp->bt_lru_nr--;
+ atomic_dec(&bp->b_hold);
+ }
+ spin_unlock(&btp->bt_lru_lock);
+ }
+ ASSERT(atomic_read(&bp->b_hold) >= 1);
+}
STATIC void
_xfs_buf_initialize(
@@ -186,7 +252,9 @@ _xfs_buf_initialize(
memset(bp, 0, sizeof(xfs_buf_t));
atomic_set(&bp->b_hold, 1);
+ atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
+ INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
RB_CLEAR_NODE(&bp->b_rbnode);
sema_init(&bp->b_sema, 0); /* held, no waiters */
@@ -262,6 +330,8 @@ xfs_buf_free(
{
trace_xfs_buf_free(bp, _RET_IP_);
+ ASSERT(list_empty(&bp->b_lru));
+
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i;
@@ -337,7 +407,6 @@ _xfs_buf_lookup_pages(
__func__, gfp_mask);
XFS_STATS_INC(xb_page_retries);
- xfsbufd_wakeup(NULL, 0, gfp_mask);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
@@ -828,6 +897,7 @@ xfs_buf_rele(
if (!pag) {
ASSERT(!bp->b_relse);
+ ASSERT(list_empty(&bp->b_lru));
ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
if (atomic_dec_and_test(&bp->b_hold))
xfs_buf_free(bp);
@@ -835,13 +905,19 @@ xfs_buf_rele(
}
ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
+
ASSERT(atomic_read(&bp->b_hold) > 0);
if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
if (bp->b_relse) {
atomic_inc(&bp->b_hold);
spin_unlock(&pag->pag_buf_lock);
bp->b_relse(bp);
+ } else if (!(bp->b_flags & XBF_STALE) &&
+ atomic_read(&bp->b_lru_ref)) {
+ xfs_buf_lru_add(bp);
+ spin_unlock(&pag->pag_buf_lock);
} else {
+ xfs_buf_lru_del(bp);
ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
spin_unlock(&pag->pag_buf_lock);
@@ -1438,51 +1514,84 @@ xfs_buf_iomove(
*/
/*
- * Wait for any bufs with callbacks that have been submitted but
- * have not yet returned... walk the hash list for the target.
+ * Wait for any bufs with callbacks that have been submitted but have not yet
+ * returned. These buffers will have an elevated hold count, so wait on those
+ * while freeing all the buffers only held by the LRU.
*/
void
xfs_wait_buftarg(
struct xfs_buftarg *btp)
{
- struct xfs_perag *pag;
- uint i;
+ struct xfs_buf *bp;
- for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
- pag = xfs_perag_get(btp->bt_mount, i);
- spin_lock(&pag->pag_buf_lock);
- while (rb_first(&pag->pag_buf_tree)) {
- spin_unlock(&pag->pag_buf_lock);
+restart:
+ spin_lock(&btp->bt_lru_lock);
+ while (!list_empty(&btp->bt_lru)) {
+ bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+ if (atomic_read(&bp->b_hold) > 1) {
+ spin_unlock(&btp->bt_lru_lock);
delay(100);
- spin_lock(&pag->pag_buf_lock);
+ goto restart;
}
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
+ /*
+ * clear the LRU reference count so the bufer doesn't get
+ * ignored in xfs_buf_rele().
+ */
+ atomic_set(&bp->b_lru_ref, 0);
+ spin_unlock(&btp->bt_lru_lock);
+ xfs_buf_rele(bp);
+ spin_lock(&btp->bt_lru_lock);
}
+ spin_unlock(&btp->bt_lru_lock);
}
-/*
- * buftarg list for delwrite queue processing
- */
-static LIST_HEAD(xfs_buftarg_list);
-static DEFINE_SPINLOCK(xfs_buftarg_lock);
-
-STATIC void
-xfs_register_buftarg(
- xfs_buftarg_t *btp)
+int
+xfs_buftarg_shrink(
+ struct shrinker *shrink,
+ int nr_to_scan,
+ gfp_t mask)
{
- spin_lock(&xfs_buftarg_lock);
- list_add(&btp->bt_list, &xfs_buftarg_list);
- spin_unlock(&xfs_buftarg_lock);
-}
+ struct xfs_buftarg *btp = container_of(shrink,
+ struct xfs_buftarg, bt_shrinker);
+ struct xfs_buf *bp;
+ LIST_HEAD(dispose);
-STATIC void
-xfs_unregister_buftarg(
- xfs_buftarg_t *btp)
-{
- spin_lock(&xfs_buftarg_lock);
- list_del(&btp->bt_list);
- spin_unlock(&xfs_buftarg_lock);
+ if (!nr_to_scan)
+ return btp->bt_lru_nr;
+
+ spin_lock(&btp->bt_lru_lock);
+ while (!list_empty(&btp->bt_lru)) {
+ if (nr_to_scan-- <= 0)
+ break;
+
+ bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+
+ /*
+ * Decrement the b_lru_ref count unless the value is already
+ * zero. If the value is already zero, we need to reclaim the
+ * buffer, otherwise it gets another trip through the LRU.
+ */
+ if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+ list_move_tail(&bp->b_lru, &btp->bt_lru);
+ continue;
+ }
+
+ /*
+ * remove the buffer from the LRU now to avoid needing another
+ * lock round trip inside xfs_buf_rele().
+ */
+ list_move(&bp->b_lru, &dispose);
+ btp->bt_lru_nr--;
+ }
+ spin_unlock(&btp->bt_lru_lock);
+
+ while (!list_empty(&dispose)) {
+ bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
+ list_del_init(&bp->b_lru);
+ xfs_buf_rele(bp);
+ }
+
+ return btp->bt_lru_nr;
}
void
@@ -1490,17 +1599,14 @@ xfs_free_buftarg(
struct xfs_mount *mp,
struct xfs_buftarg *btp)
{
+ unregister_shrinker(&btp->bt_shrinker);
+
xfs_flush_buftarg(btp, 1);
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_blkdev_issue_flush(btp);
iput(btp->bt_mapping->host);
- /* Unregister the buftarg first so that we don't get a
- * wakeup finding a non-existent task
- */
- xfs_unregister_buftarg(btp);
kthread_stop(btp->bt_task);
-
kmem_free(btp);
}
@@ -1597,20 +1703,13 @@ xfs_alloc_delwrite_queue(
xfs_buftarg_t *btp,
const char *fsname)
{
- int error = 0;
-
- INIT_LIST_HEAD(&btp->bt_list);
INIT_LIST_HEAD(&btp->bt_delwrite_queue);
spin_lock_init(&btp->bt_delwrite_lock);
btp->bt_flags = 0;
btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
- if (IS_ERR(btp->bt_task)) {
- error = PTR_ERR(btp->bt_task);
- goto out_error;
- }
- xfs_register_buftarg(btp);
-out_error:
- return error;
+ if (IS_ERR(btp->bt_task))
+ return PTR_ERR(btp->bt_task);
+ return 0;
}
xfs_buftarg_t *
@@ -1627,12 +1726,17 @@ xfs_alloc_buftarg(
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
+ INIT_LIST_HEAD(&btp->bt_lru);
+ spin_lock_init(&btp->bt_lru_lock);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
if (xfs_mapping_buftarg(btp, bdev))
goto error;
if (xfs_alloc_delwrite_queue(btp, fsname))
goto error;
+ btp->bt_shrinker.shrink = xfs_buftarg_shrink;
+ btp->bt_shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&btp->bt_shrinker);
return btp;
error:
@@ -1737,27 +1841,6 @@ xfs_buf_runall_queues(
flush_workqueue(queue);
}
-STATIC int
-xfsbufd_wakeup(
- struct shrinker *shrink,
- int priority,
- gfp_t mask)
-{
- xfs_buftarg_t *btp;
-
- spin_lock(&xfs_buftarg_lock);
- list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
- if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
- continue;
- if (list_empty(&btp->bt_delwrite_queue))
- continue;
- set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
- wake_up_process(btp->bt_task);
- }
- spin_unlock(&xfs_buftarg_lock);
- return 0;
-}
-
/*
* Move as many buffers as specified to the supplied list
* idicating if we skipped any buffers to prevent deadlocks.
@@ -1952,7 +2035,6 @@ xfs_buf_init(void)
if (!xfsconvertd_workqueue)
goto out_destroy_xfsdatad_workqueue;
- register_shrinker(&xfs_buf_shake);
return 0;
out_destroy_xfsdatad_workqueue:
@@ -1968,7 +2050,6 @@ xfs_buf_init(void)
void
xfs_buf_terminate(void)
{
- unregister_shrinker(&xfs_buf_shake);
destroy_workqueue(xfsconvertd_workqueue);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 383a3f37cf9..a76c2428faf 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -128,10 +128,15 @@ typedef struct xfs_buftarg {
/* per device delwri queue */
struct task_struct *bt_task;
- struct list_head bt_list;
struct list_head bt_delwrite_queue;
spinlock_t bt_delwrite_lock;
unsigned long bt_flags;
+
+ /* LRU control structures */
+ struct shrinker bt_shrinker;
+ struct list_head bt_lru;
+ spinlock_t bt_lru_lock;
+ unsigned int bt_lru_nr;
} xfs_buftarg_t;
/*
@@ -164,9 +169,11 @@ typedef struct xfs_buf {
xfs_off_t b_file_offset; /* offset in file */
size_t b_buffer_length;/* size of buffer in bytes */
atomic_t b_hold; /* reference count */
+ atomic_t b_lru_ref; /* lru reclaim ref count */
xfs_buf_flags_t b_flags; /* status flags */
struct semaphore b_sema; /* semaphore for lockables */
+ struct list_head b_lru; /* lru list */
wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */
@@ -264,7 +271,8 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
-#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE)
+void xfs_buf_stale(struct xfs_buf *bp);
+#define XFS_BUF_STALE(bp) xfs_buf_stale(bp);
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
#define XFS_BUF_SUPER_STALE(bp) do { \
@@ -328,9 +336,15 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
+static inline void
+xfs_buf_set_ref(
+ struct xfs_buf *bp,
+ int lru_ref)
+{
+ atomic_set(&bp->b_lru_ref, lru_ref);
+}
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref)
#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
-#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 3764d74790e..fc0114da7fd 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -70,8 +70,16 @@ xfs_fs_encode_fh(
else
fileid_type = FILEID_INO32_GEN_PARENT;
- /* filesystem may contain 64bit inode numbers */
- if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS))
+ /*
+ * If the the filesystem may contain 64bit inode numbers, we need
+ * to use larger file handles that can represent them.
+ *
+ * While we only allocate inodes that do not fit into 32 bits any
+ * large enough filesystem may contain them, thus the slightly
+ * confusing looking conditional below.
+ */
+ if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) ||
+ (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES))
fileid_type |= XFS_FILEID_TYPE_64FLAG;
/*
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 214ddd71ff7..09649499774 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -37,7 +37,6 @@
#include <kmem.h>
#include <mrlock.h>
-#include <sv.h>
#include <time.h>
#include <support/debug.h>
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 064f964d4f3..c51faaa5e29 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -834,8 +834,11 @@ xfsaild_wakeup(
struct xfs_ail *ailp,
xfs_lsn_t threshold_lsn)
{
- ailp->xa_target = threshold_lsn;
- wake_up_process(ailp->xa_task);
+ /* only ever move the target forwards */
+ if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
+ ailp->xa_target = threshold_lsn;
+ wake_up_process(ailp->xa_task);
+ }
}
STATIC int
@@ -847,8 +850,17 @@ xfsaild(
long tout = 0; /* milliseconds */
while (!kthread_should_stop()) {
- schedule_timeout_interruptible(tout ?
- msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
+ /*
+ * for short sleeps indicating congestion, don't allow us to
+ * get woken early. Otherwise all we do is bang on the AIL lock
+ * without making progress.
+ */
+ if (tout && tout <= 20)
+ __set_current_state(TASK_KILLABLE);
+ else
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(tout ?
+ msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
/* swsusp */
try_to_freeze();
@@ -1118,6 +1130,8 @@ xfs_fs_evict_inode(
*/
ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+ lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+ &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
xfs_inactive(ip);
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index afb0d7cfad1..a02480de975 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -53,14 +53,30 @@ xfs_inode_ag_walk_grab(
{
struct inode *inode = VFS_I(ip);
+ ASSERT(rcu_read_lock_held());
+
+ /*
+ * check for stale RCU freed inode
+ *
+ * If the inode has been reallocated, it doesn't matter if it's not in
+ * the AG we are walking - we are walking for writeback, so if it
+ * passes all the "valid inode" checks and is dirty, then we'll write
+ * it back anyway. If it has been reallocated and still being
+ * initialised, the XFS_INEW check below will catch it.
+ */
+ spin_lock(&ip->i_flags_lock);
+ if (!ip->i_ino)
+ goto out_unlock_noent;
+
+ /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+ if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ goto out_unlock_noent;
+ spin_unlock(&ip->i_flags_lock);
+
/* nothing to sync during shutdown */
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return EFSCORRUPTED;
- /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
- return ENOENT;
-
/* If we can't grab the inode, it must on it's way to reclaim. */
if (!igrab(inode))
return ENOENT;
@@ -72,6 +88,10 @@ xfs_inode_ag_walk_grab(
/* inode is valid */
return 0;
+
+out_unlock_noent:
+ spin_unlock(&ip->i_flags_lock);
+ return ENOENT;
}
STATIC int
@@ -98,12 +118,12 @@ restart:
int error = 0;
int i;
- read_lock(&pag->pag_ici_lock);
+ rcu_read_lock();
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH);
if (!nr_found) {
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
break;
}
@@ -118,18 +138,26 @@ restart:
batch[i] = NULL;
/*
- * Update the index for the next lookup. Catch overflows
- * into the next AG range which can occur if we have inodes
- * in the last block of the AG and we are currently
- * pointing to the last inode.
+ * Update the index for the next lookup. Catch
+ * overflows into the next AG range which can occur if
+ * we have inodes in the last block of the AG and we
+ * are currently pointing to the last inode.
+ *
+ * Because we may see inodes that are from the wrong AG
+ * due to RCU freeing and reallocation, only update the
+ * index if it lies in this AG. It was a race that lead
+ * us to see this inode, so another lookup from the
+ * same index will not find it again.
*/
+ if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
+ continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
done = 1;
}
/* unlock now we've grabbed the inodes. */
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
for (i = 0; i < nr_found; i++) {
if (!batch[i])
@@ -592,12 +620,12 @@ xfs_inode_set_reclaim_tag(
struct xfs_perag *pag;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
- write_lock(&pag->pag_ici_lock);
+ spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
__xfs_inode_set_reclaim_tag(pag, ip);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
}
@@ -639,9 +667,14 @@ xfs_reclaim_inode_grab(
struct xfs_inode *ip,
int flags)
{
+ ASSERT(rcu_read_lock_held());
+
+ /* quick check for stale RCU freed inode */
+ if (!ip->i_ino)
+ return 1;
/*
- * do some unlocked checks first to avoid unnecceary lock traffic.
+ * do some unlocked checks first to avoid unnecessary lock traffic.
* The first is a flush lock check, the second is a already in reclaim
* check. Only do these checks if we are not going to block on locks.
*/
@@ -654,11 +687,16 @@ xfs_reclaim_inode_grab(
* The radix tree lock here protects a thread in xfs_iget from racing
* with us starting reclaim on the inode. Once we have the
* XFS_IRECLAIM flag set it will not touch us.
+ *
+ * Due to RCU lookup, we may find inodes that have been freed and only
+ * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
+ * aren't candidates for reclaim at all, so we must check the
+ * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
*/
spin_lock(&ip->i_flags_lock);
- ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
- if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
- /* ignore as it is already under reclaim */
+ if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
+ __xfs_iflags_test(ip, XFS_IRECLAIM)) {
+ /* not a reclaim candidate. */
spin_unlock(&ip->i_flags_lock);
return 1;
}
@@ -795,12 +833,12 @@ reclaim:
* added to the tree assert that it's been there before to catch
* problems with the inode life time early on.
*/
- write_lock(&pag->pag_ici_lock);
+ spin_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
ASSERT(0);
__xfs_inode_clear_reclaim(pag, ip);
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
/*
* Here we do an (almost) spurious inode lock in order to coordinate
@@ -864,14 +902,14 @@ restart:
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
int i;
- write_lock(&pag->pag_ici_lock);
+ rcu_read_lock();
nr_found = radix_tree_gang_lookup_tag(
&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH,
XFS_ICI_RECLAIM_TAG);
if (!nr_found) {
- write_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
break;
}
@@ -891,14 +929,24 @@ restart:
* occur if we have inodes in the last block of
* the AG and we are currently pointing to the
* last inode.
+ *
+ * Because we may see inodes that are from the
+ * wrong AG due to RCU freeing and
+ * reallocation, only update the index if it
+ * lies in this AG. It was a race that lead us
+ * to see this inode, so another lookup from
+ * the same index will not find it again.
*/
+ if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
+ pag->pag_agno)
+ continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
done = 1;
}
/* unlock now we've grabbed the inodes. */
- write_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
for (i = 0; i < nr_found; i++) {
if (!batch[i])
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index acef2e98c59..647af2a2e7a 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -766,8 +766,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
__field(int, curr_res)
__field(int, unit_res)
__field(unsigned int, flags)
- __field(void *, reserve_headq)
- __field(void *, write_headq)
+ __field(int, reserveq)
+ __field(int, writeq)
__field(int, grant_reserve_cycle)
__field(int, grant_reserve_bytes)
__field(int, grant_write_cycle)
@@ -784,19 +784,21 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
__entry->curr_res = tic->t_curr_res;
__entry->unit_res = tic->t_unit_res;
__entry->flags = tic->t_flags;
- __entry->reserve_headq = log->l_reserve_headq;
- __entry->write_headq = log->l_write_headq;
- __entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
- __entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
- __entry->grant_write_cycle = log->l_grant_write_cycle;
- __entry->grant_write_bytes = log->l_grant_write_bytes;
+ __entry->reserveq = list_empty(&log->l_reserveq);
+ __entry->writeq = list_empty(&log->l_writeq);
+ xlog_crack_grant_head(&log->l_grant_reserve_head,
+ &__entry->grant_reserve_cycle,
+ &__entry->grant_reserve_bytes);
+ xlog_crack_grant_head(&log->l_grant_write_head,
+ &__entry->grant_write_cycle,
+ &__entry->grant_write_bytes);
__entry->curr_cycle = log->l_curr_cycle;
__entry->curr_block = log->l_curr_block;
- __entry->tail_lsn = log->l_tail_lsn;
+ __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
),
TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
- "t_unit_res %u t_flags %s reserve_headq 0x%p "
- "write_headq 0x%p grant_reserve_cycle %d "
+ "t_unit_res %u t_flags %s reserveq %s "
+ "writeq %s grant_reserve_cycle %d "
"grant_reserve_bytes %d grant_write_cycle %d "
"grant_write_bytes %d curr_cycle %d curr_block %d "
"tail_cycle %d tail_block %d",
@@ -807,8 +809,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
__entry->curr_res,
__entry->unit_res,
__print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
- __entry->reserve_headq,
- __entry->write_headq,
+ __entry->reserveq ? "empty" : "active",
+ __entry->writeq ? "empty" : "active",
__entry->grant_reserve_cycle,
__entry->grant_reserve_bytes,
__entry->grant_write_cycle,
@@ -835,6 +837,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
@@ -842,6 +845,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
@@ -935,10 +939,10 @@ DEFINE_PAGE_EVENT(xfs_writepage);
DEFINE_PAGE_EVENT(xfs_releasepage);
DEFINE_PAGE_EVENT(xfs_invalidatepage);
-DECLARE_EVENT_CLASS(xfs_iomap_class,
+DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
- int flags, struct xfs_bmbt_irec *irec),
- TP_ARGS(ip, offset, count, flags, irec),
+ int type, struct xfs_bmbt_irec *irec),
+ TP_ARGS(ip, offset, count, type, irec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
@@ -946,7 +950,7 @@ DECLARE_EVENT_CLASS(xfs_iomap_class,
__field(loff_t, new_size)
__field(loff_t, offset)
__field(size_t, count)
- __field(int, flags)
+ __field(int, type)
__field(xfs_fileoff_t, startoff)
__field(xfs_fsblock_t, startblock)
__field(xfs_filblks_t, blockcount)
@@ -958,13 +962,13 @@ DECLARE_EVENT_CLASS(xfs_iomap_class,
__entry->new_size = ip->i_new_size;
__entry->offset = offset;
__entry->count = count;
- __entry->flags = flags;
+ __entry->type = type;
__entry->startoff = irec ? irec->br_startoff : 0;
__entry->startblock = irec ? irec->br_startblock : 0;
__entry->blockcount = irec ? irec->br_blockcount : 0;
),
TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
- "offset 0x%llx count %zd flags %s "
+ "offset 0x%llx count %zd type %s "
"startoff 0x%llx startblock %lld blockcount 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
@@ -972,20 +976,21 @@ DECLARE_EVENT_CLASS(xfs_iomap_class,
__entry->new_size,
__entry->offset,
__entry->count,
- __print_flags(__entry->flags, "|", BMAPI_FLAGS),
+ __print_symbolic(__entry->type, XFS_IO_TYPES),
__entry->startoff,
(__int64_t)__entry->startblock,
__entry->blockcount)
)
#define DEFINE_IOMAP_EVENT(name) \
-DEFINE_EVENT(xfs_iomap_class, name, \
+DEFINE_EVENT(xfs_imap_class, name, \
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
- int flags, struct xfs_bmbt_irec *irec), \
- TP_ARGS(ip, offset, count, flags, irec))
-DEFINE_IOMAP_EVENT(xfs_iomap_enter);
-DEFINE_IOMAP_EVENT(xfs_iomap_found);
-DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
+ int type, struct xfs_bmbt_irec *irec), \
+ TP_ARGS(ip, offset, count, type, irec))
+DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1022,6 +1027,7 @@ DEFINE_EVENT(xfs_simple_io_class, name, \
TP_ARGS(ip, offset, count))
DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
+DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
TRACE_EVENT(xfs_itruncate_start,
@@ -1420,6 +1426,7 @@ DEFINE_EVENT(xfs_alloc_class, name, \
TP_PROTO(struct xfs_alloc_arg *args), \
TP_ARGS(args))
DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index faf8e1a83a1..d22aa310310 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -149,7 +149,6 @@ xfs_qm_dqdestroy(
ASSERT(list_empty(&dqp->q_freelist));
mutex_destroy(&dqp->q_qlock);
- sv_destroy(&dqp->q_pinwait);
kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
atomic_dec(&xfs_Gqm->qm_totaldquots);
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 63c7a1a6c02..58632cc17f2 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -227,7 +227,7 @@ typedef struct xfs_perag {
atomic_t pagf_fstrms; /* # of filestreams active in this AG */
- rwlock_t pag_ici_lock; /* incore inode lock */
+ spinlock_t pag_ici_lock; /* incore inode cache lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
int pag_ici_reclaimable; /* reclaimable inodes */
struct mutex pag_ici_reclaim_lock; /* serialisation point */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 112abc439ca..fa8723f5870 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -577,61 +577,58 @@ xfs_alloc_ag_vextent_exact(
xfs_extlen_t rlen; /* length of returned extent */
ASSERT(args->alignment == 1);
+
/*
* Allocate/initialize a cursor for the by-number freespace btree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO);
+ args->agno, XFS_BTNUM_BNO);
+
/*
* Lookup bno and minlen in the btree (minlen is irrelevant, really).
* Look for the closest free block <= bno, it must contain bno
* if any free block does.
*/
- if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
+ error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
+ if (error)
goto error0;
- if (!i) {
- /*
- * Didn't find it, return null.
- */
- xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
- args->agbno = NULLAGBLOCK;
- return 0;
- }
+ if (!i)
+ goto not_found;
+
/*
* Grab the freespace record.
*/
- if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
+ error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
+ if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
ASSERT(fbno <= args->agbno);
minend = args->agbno + args->minlen;
maxend = args->agbno + args->maxlen;
fend = fbno + flen;
+
/*
* Give up if the freespace isn't long enough for the minimum request.
*/
- if (fend < minend) {
- xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
- args->agbno = NULLAGBLOCK;
- return 0;
- }
+ if (fend < minend)
+ goto not_found;
+
/*
* End of extent will be smaller of the freespace end and the
* maximal requested end.
- */
- end = XFS_AGBLOCK_MIN(fend, maxend);
- /*
+ *
* Fix the length according to mod and prod if given.
*/
+ end = XFS_AGBLOCK_MIN(fend, maxend);
args->len = end - args->agbno;
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args)) {
- xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
- return 0;
- }
+ if (!xfs_alloc_fix_minleft(args))
+ goto not_found;
+
rlen = args->len;
ASSERT(args->agbno + rlen <= fend);
end = args->agbno + rlen;
+
/*
* We are allocating agbno for rlen [agbno .. end]
* Allocate/initialize a cursor for the by-size btree.
@@ -640,16 +637,25 @@ xfs_alloc_ag_vextent_exact(
args->agno, XFS_BTNUM_CNT);
ASSERT(args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
- if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
- args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
+ error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
+ args->len, XFSA_FIXUP_BNO_OK);
+ if (error) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
goto error0;
}
+
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- trace_xfs_alloc_exact_done(args);
args->wasfromfl = 0;
+ trace_xfs_alloc_exact_done(args);
+ return 0;
+
+not_found:
+ /* Didn't find it, return null. */
+ xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+ args->agbno = NULLAGBLOCK;
+ trace_xfs_alloc_exact_notfound(args);
return 0;
error0:
@@ -659,6 +665,95 @@ error0:
}
/*
+ * Search the btree in a given direction via the search cursor and compare
+ * the records found against the good extent we've already found.
+ */
+STATIC int
+xfs_alloc_find_best_extent(
+ struct xfs_alloc_arg *args, /* allocation argument structure */
+ struct xfs_btree_cur **gcur, /* good cursor */
+ struct xfs_btree_cur **scur, /* searching cursor */
+ xfs_agblock_t gdiff, /* difference for search comparison */
+ xfs_agblock_t *sbno, /* extent found by search */
+ xfs_extlen_t *slen,
+ xfs_extlen_t *slena, /* aligned length */
+ int dir) /* 0 = search right, 1 = search left */
+{
+ xfs_agblock_t bno;
+ xfs_agblock_t new;
+ xfs_agblock_t sdiff;
+ int error;
+ int i;
+
+ /* The good extent is perfect, no need to search. */
+ if (!gdiff)
+ goto out_use_good;
+
+ /*
+ * Look until we find a better one, run out of space or run off the end.
+ */
+ do {
+ error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
+ if (error)
+ goto error0;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ xfs_alloc_compute_aligned(*sbno, *slen, args->alignment,
+ args->minlen, &bno, slena);
+
+ /*
+ * The good extent is closer than this one.
+ */
+ if (!dir) {
+ if (bno >= args->agbno + gdiff)
+ goto out_use_good;
+ } else {
+ if (bno <= args->agbno - gdiff)
+ goto out_use_good;
+ }
+
+ /*
+ * Same distance, compare length and pick the best.
+ */
+ if (*slena >= args->minlen) {
+ args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
+ xfs_alloc_fix_len(args);
+
+ sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+ args->alignment, *sbno,
+ *slen, &new);
+
+ /*
+ * Choose closer size and invalidate other cursor.
+ */
+ if (sdiff < gdiff)
+ goto out_use_search;
+ goto out_use_good;
+ }
+
+ if (!dir)
+ error = xfs_btree_increment(*scur, 0, &i);
+ else
+ error = xfs_btree_decrement(*scur, 0, &i);
+ if (error)
+ goto error0;
+ } while (i);
+
+out_use_good:
+ xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
+ *scur = NULL;
+ return 0;
+
+out_use_search:
+ xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
+ *gcur = NULL;
+ return 0;
+
+error0:
+ /* caller invalidates cursors */
+ return error;
+}
+
+/*
* Allocate a variable extent near bno in the allocation group agno.
* Extent's length (returned in len) will be between minlen and maxlen,
* and of the form k * prod + mod unless there's nothing that large.
@@ -925,203 +1020,45 @@ xfs_alloc_ag_vextent_near(
}
}
} while (bno_cur_lt || bno_cur_gt);
+
/*
* Got both cursors still active, need to find better entry.
*/
if (bno_cur_lt && bno_cur_gt) {
- /*
- * Left side is long enough, look for a right side entry.
- */
if (ltlena >= args->minlen) {
/*
- * Fix up the length.
+ * Left side is good, look for a right side entry.
*/
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
- rlen = args->len;
- ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+ ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
args->alignment, ltbno, ltlen, &ltnew);
+
+ error = xfs_alloc_find_best_extent(args,
+ &bno_cur_lt, &bno_cur_gt,
+ ltdiff, &gtbno, &gtlen, &gtlena,
+ 0 /* search right */);
+ } else {
+ ASSERT(gtlena >= args->minlen);
+
/*
- * Not perfect.
- */
- if (ltdiff) {
- /*
- * Look until we find a better one, run out of
- * space, or run off the end.
- */
- while (bno_cur_lt && bno_cur_gt) {
- if ((error = xfs_alloc_get_rec(
- bno_cur_gt, &gtbno,
- &gtlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(gtbno, gtlen,
- args->alignment, args->minlen,
- &gtbnoa, &gtlena);
- /*
- * The left one is clearly better.
- */
- if (gtbnoa >= args->agbno + ltdiff) {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- break;
- }
- /*
- * If we reach a big enough entry,
- * compare the two and pick the best.
- */
- if (gtlena >= args->minlen) {
- args->len =
- XFS_EXTLEN_MIN(gtlena,
- args->maxlen);
- xfs_alloc_fix_len(args);
- rlen = args->len;
- gtdiff = xfs_alloc_compute_diff(
- args->agbno, rlen,
- args->alignment,
- gtbno, gtlen, &gtnew);
- /*
- * Right side is better.
- */
- if (gtdiff < ltdiff) {
- xfs_btree_del_cursor(
- bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
- /*
- * Left side is better.
- */
- else {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- break;
- }
- /*
- * Fell off the right end.
- */
- if ((error = xfs_btree_increment(
- bno_cur_gt, 0, &i)))
- goto error0;
- if (!i) {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- break;
- }
- }
- }
- /*
- * The left side is perfect, trash the right side.
- */
- else {
- xfs_btree_del_cursor(bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- }
- /*
- * It's the right side that was found first, look left.
- */
- else {
- /*
- * Fix up the length.
+ * Right side is good, look for a left side entry.
*/
args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
xfs_alloc_fix_len(args);
- rlen = args->len;
- gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+ gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
args->alignment, gtbno, gtlen, &gtnew);
- /*
- * Right side entry isn't perfect.
- */
- if (gtdiff) {
- /*
- * Look until we find a better one, run out of
- * space, or run off the end.
- */
- while (bno_cur_lt && bno_cur_gt) {
- if ((error = xfs_alloc_get_rec(
- bno_cur_lt, &ltbno,
- &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(ltbno, ltlen,
- args->alignment, args->minlen,
- &ltbnoa, &ltlena);
- /*
- * The right one is clearly better.
- */
- if (ltbnoa <= args->agbno - gtdiff) {
- xfs_btree_del_cursor(
- bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- break;
- }
- /*
- * If we reach a big enough entry,
- * compare the two and pick the best.
- */
- if (ltlena >= args->minlen) {
- args->len = XFS_EXTLEN_MIN(
- ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- rlen = args->len;
- ltdiff = xfs_alloc_compute_diff(
- args->agbno, rlen,
- args->alignment,
- ltbno, ltlen, &ltnew);
- /*
- * Left side is better.
- */
- if (ltdiff < gtdiff) {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- /*
- * Right side is better.
- */
- else {
- xfs_btree_del_cursor(
- bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
- break;
- }
- /*
- * Fell off the left end.
- */
- if ((error = xfs_btree_decrement(
- bno_cur_lt, 0, &i)))
- goto error0;
- if (!i) {
- xfs_btree_del_cursor(bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- break;
- }
- }
- }
- /*
- * The right side is perfect, trash the left side.
- */
- else {
- xfs_btree_del_cursor(bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
+
+ error = xfs_alloc_find_best_extent(args,
+ &bno_cur_gt, &bno_cur_lt,
+ gtdiff, &ltbno, &ltlen, &ltlena,
+ 1 /* search left */);
}
+
+ if (error)
+ goto error0;
}
+
/*
* If we couldn't get anything, give up.
*/
@@ -1130,6 +1067,7 @@ xfs_alloc_ag_vextent_near(
args->agbno = NULLAGBLOCK;
return 0;
}
+
/*
* At this point we have selected a freespace entry, either to the
* left or to the right. If it's on the right, copy all the
@@ -1146,6 +1084,7 @@ xfs_alloc_ag_vextent_near(
j = 1;
} else
j = 0;
+
/*
* Fix up the length and compute the useful address.
*/
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index a6cff8edcdb..71e90dc2aeb 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -637,7 +637,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
* It didn't all fit, so we have to sort everything on hashval.
*/
sbsize = sf->hdr.count * sizeof(*sbuf);
- sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
+ sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
/*
* Scan the attribute list for the rest of the entries, storing
@@ -2386,7 +2386,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
args.dp = context->dp;
args.whichfork = XFS_ATTR_FORK;
args.valuelen = valuelen;
- args.value = kmem_alloc(valuelen, KM_SLEEP);
+ args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
retval = xfs_attr_rmtval_get(&args);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 04f9cca8da7..2f9e97c128a 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -634,9 +634,8 @@ xfs_btree_read_bufl(
return error;
}
ASSERT(!bp || !XFS_BUF_GETERROR(bp));
- if (bp != NULL) {
+ if (bp)
XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
- }
*bpp = bp;
return 0;
}
@@ -944,13 +943,13 @@ xfs_btree_set_refs(
switch (cur->bc_btnum) {
case XFS_BTNUM_BNO:
case XFS_BTNUM_CNT:
- XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
+ XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
break;
case XFS_BTNUM_INO:
- XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF);
+ XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, XFS_INO_BTREE_REF);
break;
case XFS_BTNUM_BMAP:
- XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF);
+ XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_BMAP_BTREE_REF);
break;
default:
ASSERT(0);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2686d0d54c5..ed2b65f3f8b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -142,7 +142,7 @@ xfs_buf_item_log_check(
#endif
STATIC void xfs_buf_error_relse(xfs_buf_t *bp);
-STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
+STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
/*
* This returns the number of log iovecs needed to log the
@@ -450,7 +450,7 @@ xfs_buf_item_unpin(
* xfs_trans_ail_delete() drops the AIL lock.
*/
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
- xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
+ xfs_buf_do_callbacks(bp);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
} else {
@@ -918,15 +918,26 @@ xfs_buf_attach_iodone(
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
}
+/*
+ * We can have many callbacks on a buffer. Running the callbacks individually
+ * can cause a lot of contention on the AIL lock, so we allow for a single
+ * callback to be able to scan the remaining lip->li_bio_list for other items
+ * of the same type and callback to be processed in the first call.
+ *
+ * As a result, the loop walking the callback list below will also modify the
+ * list. it removes the first item from the list and then runs the callback.
+ * The loop then restarts from the new head of the list. This allows the
+ * callback to scan and modify the list attached to the buffer and we don't
+ * have to care about maintaining a next item pointer.
+ */
STATIC void
xfs_buf_do_callbacks(
- xfs_buf_t *bp,
- xfs_log_item_t *lip)
+ struct xfs_buf *bp)
{
- xfs_log_item_t *nlip;
+ struct xfs_log_item *lip;
- while (lip != NULL) {
- nlip = lip->li_bio_list;
+ while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) {
+ XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list);
ASSERT(lip->li_cb != NULL);
/*
* Clear the next pointer so we don't have any
@@ -936,7 +947,6 @@ xfs_buf_do_callbacks(
*/
lip->li_bio_list = NULL;
lip->li_cb(bp, lip);
- lip = nlip;
}
}
@@ -970,7 +980,7 @@ xfs_buf_iodone_callbacks(
ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
XFS_BUF_SUPER_STALE(bp);
trace_xfs_buf_item_iodone(bp, _RET_IP_);
- xfs_buf_do_callbacks(bp, lip);
+ xfs_buf_do_callbacks(bp);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
xfs_buf_ioend(bp, 0);
@@ -1029,7 +1039,7 @@ xfs_buf_iodone_callbacks(
return;
}
- xfs_buf_do_callbacks(bp, lip);
+ xfs_buf_do_callbacks(bp);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
xfs_buf_ioend(bp, 0);
@@ -1063,7 +1073,7 @@ xfs_buf_error_relse(
* We have to unpin the pinned buffers so do the
* callbacks.
*/
- xfs_buf_do_callbacks(bp, lip);
+ xfs_buf_do_callbacks(bp);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 0e2ed43f16c..b6ecd2061e7 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -105,17 +105,6 @@ typedef struct xfs_buf_log_item {
xfs_buf_log_format_t bli_format; /* in-log header */
} xfs_buf_log_item_t;
-/*
- * This structure is used during recovery to record the buf log
- * items which have been canceled and should not be replayed.
- */
-typedef struct xfs_buf_cancel {
- xfs_daddr_t bc_blkno;
- uint bc_len;
- int bc_refcount;
- struct xfs_buf_cancel *bc_next;
-} xfs_buf_cancel_t;
-
void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
void xfs_buf_item_relse(struct xfs_buf *);
void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index a55e687bf56..75f2ef60e57 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -48,6 +48,28 @@ xfs_efi_item_free(
}
/*
+ * Freeing the efi requires that we remove it from the AIL if it has already
+ * been placed there. However, the EFI may not yet have been placed in the AIL
+ * when called by xfs_efi_release() from EFD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the
+ * test_and_clear_bit(XFS_EFI_COMMITTED) to ensure only the last caller frees
+ * the EFI.
+ */
+STATIC void
+__xfs_efi_release(
+ struct xfs_efi_log_item *efip)
+{
+ struct xfs_ail *ailp = efip->efi_item.li_ailp;
+
+ if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) {
+ spin_lock(&ailp->xa_lock);
+ /* xfs_trans_ail_delete() drops the AIL lock. */
+ xfs_trans_ail_delete(ailp, &efip->efi_item);
+ xfs_efi_item_free(efip);
+ }
+}
+
+/*
* This returns the number of iovecs needed to log the given efi item.
* We only need 1 iovec for an efi item. It just logs the efi_log_format
* structure.
@@ -74,7 +96,8 @@ xfs_efi_item_format(
struct xfs_efi_log_item *efip = EFI_ITEM(lip);
uint size;
- ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
+ ASSERT(atomic_read(&efip->efi_next_extent) ==
+ efip->efi_format.efi_nextents);
efip->efi_format.efi_type = XFS_LI_EFI;
@@ -99,10 +122,12 @@ xfs_efi_item_pin(
}
/*
- * While EFIs cannot really be pinned, the unpin operation is the
- * last place at which the EFI is manipulated during a transaction.
- * Here we coordinate with xfs_efi_cancel() to determine who gets to
- * free the EFI.
+ * While EFIs cannot really be pinned, the unpin operation is the last place at
+ * which the EFI is manipulated during a transaction. If we are being asked to
+ * remove the EFI it's because the transaction has been cancelled and by
+ * definition that means the EFI cannot be in the AIL so remove it from the
+ * transaction and free it. Otherwise coordinate with xfs_efi_release() (via
+ * XFS_EFI_COMMITTED) to determine who gets to free the EFI.
*/
STATIC void
xfs_efi_item_unpin(
@@ -110,20 +135,14 @@ xfs_efi_item_unpin(
int remove)
{
struct xfs_efi_log_item *efip = EFI_ITEM(lip);
- struct xfs_ail *ailp = lip->li_ailp;
-
- spin_lock(&ailp->xa_lock);
- if (efip->efi_flags & XFS_EFI_CANCELED) {
- if (remove)
- xfs_trans_del_item(lip);
- /* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, lip);
+ if (remove) {
+ ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
+ xfs_trans_del_item(lip);
xfs_efi_item_free(efip);
- } else {
- efip->efi_flags |= XFS_EFI_COMMITTED;
- spin_unlock(&ailp->xa_lock);
+ return;
}
+ __xfs_efi_release(efip);
}
/*
@@ -152,16 +171,20 @@ xfs_efi_item_unlock(
}
/*
- * The EFI is logged only once and cannot be moved in the log, so
- * simply return the lsn at which it's been logged. The canceled
- * flag is not paid any attention here. Checking for that is delayed
- * until the EFI is unpinned.
+ * The EFI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged. For bulk transaction committed
+ * processing, the EFI may be processed but not yet unpinned prior to the EFD
+ * being processed. Set the XFS_EFI_COMMITTED flag so this case can be detected
+ * when processing the EFD.
*/
STATIC xfs_lsn_t
xfs_efi_item_committed(
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
+ struct xfs_efi_log_item *efip = EFI_ITEM(lip);
+
+ set_bit(XFS_EFI_COMMITTED, &efip->efi_flags);
return lsn;
}
@@ -230,6 +253,7 @@ xfs_efi_init(
xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
efip->efi_format.efi_nextents = nextents;
efip->efi_format.efi_id = (__psint_t)(void*)efip;
+ atomic_set(&efip->efi_next_extent, 0);
return efip;
}
@@ -289,37 +313,18 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
}
/*
- * This is called by the efd item code below to release references to
- * the given efi item. Each efd calls this with the number of
- * extents that it has logged, and when the sum of these reaches
- * the total number of extents logged by this efi item we can free
- * the efi item.
- *
- * Freeing the efi item requires that we remove it from the AIL.
- * We'll use the AIL lock to protect our counters as well as
- * the removal from the AIL.
+ * This is called by the efd item code below to release references to the given
+ * efi item. Each efd calls this with the number of extents that it has
+ * logged, and when the sum of these reaches the total number of extents logged
+ * by this efi item we can free the efi item.
*/
void
xfs_efi_release(xfs_efi_log_item_t *efip,
uint nextents)
{
- struct xfs_ail *ailp = efip->efi_item.li_ailp;
- int extents_left;
-
- ASSERT(efip->efi_next_extent > 0);
- ASSERT(efip->efi_flags & XFS_EFI_COMMITTED);
-
- spin_lock(&ailp->xa_lock);
- ASSERT(efip->efi_next_extent >= nextents);
- efip->efi_next_extent -= nextents;
- extents_left = efip->efi_next_extent;
- if (extents_left == 0) {
- /* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
- xfs_efi_item_free(efip);
- } else {
- spin_unlock(&ailp->xa_lock);
- }
+ ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
+ if (atomic_sub_and_test(nextents, &efip->efi_next_extent))
+ __xfs_efi_release(efip);
}
static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 0d22c56fdf6..375f68e4253 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -111,11 +111,10 @@ typedef struct xfs_efd_log_format_64 {
#define XFS_EFI_MAX_FAST_EXTENTS 16
/*
- * Define EFI flags.
+ * Define EFI flag bits. Manipulated by set/clear/test_bit operators.
*/
-#define XFS_EFI_RECOVERED 0x1
-#define XFS_EFI_COMMITTED 0x2
-#define XFS_EFI_CANCELED 0x4
+#define XFS_EFI_RECOVERED 1
+#define XFS_EFI_COMMITTED 2
/*
* This is the "extent free intention" log item. It is used
@@ -125,8 +124,8 @@ typedef struct xfs_efd_log_format_64 {
*/
typedef struct xfs_efi_log_item {
xfs_log_item_t efi_item;
- uint efi_flags; /* misc flags */
- uint efi_next_extent;
+ atomic_t efi_next_extent;
+ unsigned long efi_flags; /* misc flags */
xfs_efi_log_format_t efi_format;
} xfs_efi_log_item_t;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a7c116e814a..f56d30e8040 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -374,6 +374,7 @@ xfs_growfs_data_private(
mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
} else
mp->m_maxicount = 0;
+ xfs_set_low_space_thresholds(mp);
/* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) {
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index d7de5a3f786..cb9b6d1469f 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,6 +43,17 @@
/*
+ * Define xfs inode iolock lockdep classes. We need to ensure that all active
+ * inodes are considered the same for lockdep purposes, including inodes that
+ * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
+ * guarantee the locks are considered the same when there are multiple lock
+ * initialisation siteѕ. Also, define a reclaimable inode class so it is
+ * obvious in lockdep reports which class the report is against.
+ */
+static struct lock_class_key xfs_iolock_active;
+struct lock_class_key xfs_iolock_reclaimable;
+
+/*
* Allocate and initialise an xfs_inode.
*/
STATIC struct xfs_inode *
@@ -69,8 +80,11 @@ xfs_inode_alloc(
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));
+ ASSERT(ip->i_ino == 0);
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+ lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+ &xfs_iolock_active, "xfs_iolock_active");
/* initialise the xfs inode */
ip->i_ino = ino;
@@ -85,9 +99,6 @@ xfs_inode_alloc(
ip->i_size = 0;
ip->i_new_size = 0;
- /* prevent anyone from using this yet */
- VFS_I(ip)->i_state = I_NEW;
-
return ip;
}
@@ -145,7 +156,18 @@ xfs_inode_free(
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));
- call_rcu(&ip->i_vnode.i_rcu, xfs_inode_free_callback);
+ /*
+ * Because we use RCU freeing we need to ensure the inode always
+ * appears to be reclaimed with an invalid inode number when in the
+ * free state. The ip->i_flags_lock provides the barrier against lookup
+ * races.
+ */
+ spin_lock(&ip->i_flags_lock);
+ ip->i_flags = XFS_IRECLAIM;
+ ip->i_ino = 0;
+ spin_unlock(&ip->i_flags_lock);
+
+ call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
/*
@@ -155,14 +177,29 @@ static int
xfs_iget_cache_hit(
struct xfs_perag *pag,
struct xfs_inode *ip,
+ xfs_ino_t ino,
int flags,
- int lock_flags) __releases(pag->pag_ici_lock)
+ int lock_flags) __releases(RCU)
{
struct inode *inode = VFS_I(ip);
struct xfs_mount *mp = ip->i_mount;
int error;
+ /*
+ * check for re-use of an inode within an RCU grace period due to the
+ * radix tree nodes not being updated yet. We monitor for this by
+ * setting the inode number to zero before freeing the inode structure.
+ * If the inode has been reallocated and set up, then the inode number
+ * will not match, so check for that, too.
+ */
spin_lock(&ip->i_flags_lock);
+ if (ip->i_ino != ino) {
+ trace_xfs_iget_skip(ip);
+ XFS_STATS_INC(xs_ig_frecycle);
+ error = EAGAIN;
+ goto out_error;
+ }
+
/*
* If we are racing with another cache hit that is currently
@@ -205,7 +242,7 @@ xfs_iget_cache_hit(
ip->i_flags |= XFS_IRECLAIM;
spin_unlock(&ip->i_flags_lock);
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
error = -inode_init_always(mp->m_super, inode);
if (error) {
@@ -213,7 +250,7 @@ xfs_iget_cache_hit(
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
- read_lock(&pag->pag_ici_lock);
+ rcu_read_lock();
spin_lock(&ip->i_flags_lock);
ip->i_flags &= ~XFS_INEW;
@@ -223,14 +260,20 @@ xfs_iget_cache_hit(
goto out_error;
}
- write_lock(&pag->pag_ici_lock);
+ spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
ip->i_flags |= XFS_INEW;
__xfs_inode_clear_reclaim_tag(mp, pag, ip);
inode->i_state = I_NEW;
+
+ ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+ mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+ lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+ &xfs_iolock_active, "xfs_iolock_active");
+
spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
@@ -241,7 +284,7 @@ xfs_iget_cache_hit(
/* We've got a live one. */
spin_unlock(&ip->i_flags_lock);
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
trace_xfs_iget_hit(ip);
}
@@ -255,7 +298,7 @@ xfs_iget_cache_hit(
out_error:
spin_unlock(&ip->i_flags_lock);
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
return error;
}
@@ -308,7 +351,7 @@ xfs_iget_cache_miss(
BUG();
}
- write_lock(&pag->pag_ici_lock);
+ spin_lock(&pag->pag_ici_lock);
/* insert the new inode */
error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
@@ -323,14 +366,14 @@ xfs_iget_cache_miss(
ip->i_udquot = ip->i_gdquot = NULL;
xfs_iflags_set(ip, XFS_INEW);
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
*ipp = ip;
return 0;
out_preload_end:
- write_unlock(&pag->pag_ici_lock);
+ spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
if (lock_flags)
xfs_iunlock(ip, lock_flags);
@@ -377,7 +420,7 @@ xfs_iget(
xfs_agino_t agino;
/* reject inode numbers outside existing AGs */
- if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
+ if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
return EINVAL;
/* get the perag structure and ensure that it's inode capable */
@@ -386,15 +429,15 @@ xfs_iget(
again:
error = 0;
- read_lock(&pag->pag_ici_lock);
+ rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (ip) {
- error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
+ error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
if (error)
goto out_error_or_again;
} else {
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
XFS_STATS_INC(xs_ig_missed);
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 108c7a085f9..be7cf625421 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -887,7 +887,7 @@ xfs_iread(
* around for a while. This helps to keep recently accessed
* meta-data in-core longer.
*/
- XFS_BUF_SET_REF(bp, XFS_INO_REF);
+ xfs_buf_set_ref(bp, XFS_INO_REF);
/*
* Use xfs_trans_brelse() to release the buffer containing the
@@ -2000,17 +2000,33 @@ xfs_ifree_cluster(
*/
for (i = 0; i < ninodes; i++) {
retry:
- read_lock(&pag->pag_ici_lock);
+ rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, (inum + i)));
- /* Inode not in memory or stale, nothing to do */
- if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
- read_unlock(&pag->pag_ici_lock);
+ /* Inode not in memory, nothing to do */
+ if (!ip) {
+ rcu_read_unlock();
continue;
}
/*
+ * because this is an RCU protected lookup, we could
+ * find a recently freed or even reallocated inode
+ * during the lookup. We need to check under the
+ * i_flags_lock for a valid inode here. Skip it if it
+ * is not valid, the wrong inode or stale.
+ */
+ spin_lock(&ip->i_flags_lock);
+ if (ip->i_ino != inum + i ||
+ __xfs_iflags_test(ip, XFS_ISTALE)) {
+ spin_unlock(&ip->i_flags_lock);
+ rcu_read_unlock();
+ continue;
+ }
+ spin_unlock(&ip->i_flags_lock);
+
+ /*
* Don't try to lock/unlock the current inode, but we
* _cannot_ skip the other inodes that we did not find
* in the list attached to the buffer and are not
@@ -2019,11 +2035,11 @@ retry:
*/
if (ip != free_ip &&
!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
delay(1);
goto retry;
}
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
xfs_iflock(ip);
xfs_iflags_set(ip, XFS_ISTALE);
@@ -2629,7 +2645,7 @@ xfs_iflush_cluster(
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
- read_lock(&pag->pag_ici_lock);
+ rcu_read_lock();
/* really need a gang lookup range call here */
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
first_index, inodes_per_cluster);
@@ -2640,9 +2656,21 @@ xfs_iflush_cluster(
iq = ilist[i];
if (iq == ip)
continue;
- /* if the inode lies outside this cluster, we're done. */
- if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
- break;
+
+ /*
+ * because this is an RCU protected lookup, we could find a
+ * recently freed or even reallocated inode during the lookup.
+ * We need to check under the i_flags_lock for a valid inode
+ * here. Skip it if it is not valid or the wrong inode.
+ */
+ spin_lock(&ip->i_flags_lock);
+ if (!ip->i_ino ||
+ (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
+ spin_unlock(&ip->i_flags_lock);
+ continue;
+ }
+ spin_unlock(&ip->i_flags_lock);
+
/*
* Do an un-protected check to see if the inode is dirty and
* is a candidate for flushing. These checks will be repeated
@@ -2692,7 +2720,7 @@ xfs_iflush_cluster(
}
out_free:
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
kmem_free(ilist);
out_put:
xfs_perag_put(pag);
@@ -2704,7 +2732,7 @@ cluster_corrupt_out:
* Corruption detected in the clustering loop. Invalidate the
* inode buffer and shut down the filesystem.
*/
- read_unlock(&pag->pag_ici_lock);
+ rcu_read_unlock();
/*
* Clean up the buffer. If it was B_DELWRI, just release it --
* brelse can handle it with no problems. If not, shut down the
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index fb2ca2e4cdc..5c95fa8ec11 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -376,12 +376,13 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
/*
* In-core inode flags.
*/
-#define XFS_IRECLAIM 0x0001 /* we have started reclaiming this inode */
-#define XFS_ISTALE 0x0002 /* inode has been staled */
-#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */
-#define XFS_INEW 0x0008 /* inode has just been allocated */
-#define XFS_IFILESTREAM 0x0010 /* inode is in a filestream directory */
-#define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */
+#define XFS_IRECLAIM 0x0001 /* started reclaiming this inode */
+#define XFS_ISTALE 0x0002 /* inode has been staled */
+#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */
+#define XFS_INEW 0x0008 /* inode has just been allocated */
+#define XFS_IFILESTREAM 0x0010 /* inode is in a filestream directory */
+#define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */
+#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */
/*
* Flags for inode locking.
@@ -438,6 +439,8 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
+extern struct lock_class_key xfs_iolock_reclaimable;
+
/*
* Flags for xfs_itruncate_start().
*/
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7c8d30c453c..fd4f398bd6f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -842,15 +842,64 @@ xfs_inode_item_destroy(
* flushed to disk. It is responsible for removing the inode item
* from the AIL if it has not been re-logged, and unlocking the inode's
* flush lock.
+ *
+ * To reduce AIL lock traffic as much as possible, we scan the buffer log item
+ * list for other inodes that will run this function. We remove them from the
+ * buffer list so we can process all the inode IO completions in one AIL lock
+ * traversal.
*/
void
xfs_iflush_done(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- struct xfs_inode_log_item *iip = INODE_ITEM(lip);
- xfs_inode_t *ip = iip->ili_inode;
+ struct xfs_inode_log_item *iip;
+ struct xfs_log_item *blip;
+ struct xfs_log_item *next;
+ struct xfs_log_item *prev;
struct xfs_ail *ailp = lip->li_ailp;
+ int need_ail = 0;
+
+ /*
+ * Scan the buffer IO completions for other inodes being completed and
+ * attach them to the current inode log item.
+ */
+ blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+ prev = NULL;
+ while (blip != NULL) {
+ if (lip->li_cb != xfs_iflush_done) {
+ prev = blip;
+ blip = blip->li_bio_list;
+ continue;
+ }
+
+ /* remove from list */
+ next = blip->li_bio_list;
+ if (!prev) {
+ XFS_BUF_SET_FSPRIVATE(bp, next);
+ } else {
+ prev->li_bio_list = next;
+ }
+
+ /* add to current list */
+ blip->li_bio_list = lip->li_bio_list;
+ lip->li_bio_list = blip;
+
+ /*
+ * while we have the item, do the unlocked check for needing
+ * the AIL lock.
+ */
+ iip = INODE_ITEM(blip);
+ if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
+ need_ail++;
+
+ blip = next;
+ }
+
+ /* make sure we capture the state of the initial inode. */
+ iip = INODE_ITEM(lip);
+ if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
+ need_ail++;
/*
* We only want to pull the item from the AIL if it is
@@ -861,28 +910,37 @@ xfs_iflush_done(
* the lock since it's cheaper, and then we recheck while
* holding the lock before removing the inode from the AIL.
*/
- if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) {
+ if (need_ail) {
+ struct xfs_log_item *log_items[need_ail];
+ int i = 0;
spin_lock(&ailp->xa_lock);
- if (lip->li_lsn == iip->ili_flush_lsn) {
- /* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, lip);
- } else {
- spin_unlock(&ailp->xa_lock);
+ for (blip = lip; blip; blip = blip->li_bio_list) {
+ iip = INODE_ITEM(blip);
+ if (iip->ili_logged &&
+ blip->li_lsn == iip->ili_flush_lsn) {
+ log_items[i++] = blip;
+ }
+ ASSERT(i <= need_ail);
}
+ /* xfs_trans_ail_delete_bulk() drops the AIL lock. */
+ xfs_trans_ail_delete_bulk(ailp, log_items, i);
}
- iip->ili_logged = 0;
/*
- * Clear the ili_last_fields bits now that we know that the
- * data corresponding to them is safely on disk.
+ * clean up and unlock the flush lock now we are done. We can clear the
+ * ili_last_fields bits now that we know that the data corresponding to
+ * them is safely on disk.
*/
- iip->ili_last_fields = 0;
+ for (blip = lip; blip; blip = next) {
+ next = blip->li_bio_list;
+ blip->li_bio_list = NULL;
- /*
- * Release the inode's flush lock since we're done with it.
- */
- xfs_ifunlock(ip);
+ iip = INODE_ITEM(blip);
+ iip->ili_logged = 0;
+ iip->ili_last_fields = 0;
+ xfs_ifunlock(iip->ili_inode);
+ }
}
/*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 20576146369..55582bd6665 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,127 +47,8 @@
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
-#define XFS_STRAT_WRITE_IMAPS 2
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
-STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
- int, struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
- struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
- struct xfs_bmbt_irec *, int *);
-
-int
-xfs_iomap(
- struct xfs_inode *ip,
- xfs_off_t offset,
- ssize_t count,
- int flags,
- struct xfs_bmbt_irec *imap,
- int *nimaps,
- int *new)
-{
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb, end_fsb;
- int error = 0;
- int lockmode = 0;
- int bmapi_flags = 0;
-
- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
-
- *new = 0;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return XFS_ERROR(EIO);
-
- trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
-
- switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
- case BMAPI_READ:
- lockmode = xfs_ilock_map_shared(ip);
- bmapi_flags = XFS_BMAPI_ENTIRE;
- break;
- case BMAPI_WRITE:
- lockmode = XFS_ILOCK_EXCL;
- if (flags & BMAPI_IGNSTATE)
- bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
- xfs_ilock(ip, lockmode);
- break;
- case BMAPI_ALLOCATE:
- lockmode = XFS_ILOCK_SHARED;
- bmapi_flags = XFS_BMAPI_ENTIRE;
-
- /* Attempt non-blocking lock */
- if (flags & BMAPI_TRYLOCK) {
- if (!xfs_ilock_nowait(ip, lockmode))
- return XFS_ERROR(EAGAIN);
- } else {
- xfs_ilock(ip, lockmode);
- }
- break;
- default:
- BUG();
- }
-
- ASSERT(offset <= mp->m_maxioffset);
- if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
- count = mp->m_maxioffset - offset;
- end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
-
- error = xfs_bmapi(NULL, ip, offset_fsb,
- (xfs_filblks_t)(end_fsb - offset_fsb),
- bmapi_flags, NULL, 0, imap,
- nimaps, NULL);
-
- if (error)
- goto out;
-
- switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
- case BMAPI_WRITE:
- /* If we found an extent, return it */
- if (*nimaps &&
- (imap->br_startblock != HOLESTARTBLOCK) &&
- (imap->br_startblock != DELAYSTARTBLOCK)) {
- trace_xfs_iomap_found(ip, offset, count, flags, imap);
- break;
- }
-
- if (flags & BMAPI_DIRECT) {
- error = xfs_iomap_write_direct(ip, offset, count, flags,
- imap, nimaps);
- } else {
- error = xfs_iomap_write_delay(ip, offset, count, flags,
- imap, nimaps);
- }
- if (!error) {
- trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
- }
- *new = 1;
- break;
- case BMAPI_ALLOCATE:
- /* If we found an extent, return it */
- xfs_iunlock(ip, lockmode);
- lockmode = 0;
-
- if (*nimaps && !isnullstartblock(imap->br_startblock)) {
- trace_xfs_iomap_found(ip, offset, count, flags, imap);
- break;
- }
-
- error = xfs_iomap_write_allocate(ip, offset, count,
- imap, nimaps);
- break;
- }
-
- ASSERT(*nimaps <= 1);
-
-out:
- if (lockmode)
- xfs_iunlock(ip, lockmode);
- return XFS_ERROR(error);
-}
-
STATIC int
xfs_iomap_eof_align_last_fsb(
xfs_mount_t *mp,
@@ -236,14 +117,13 @@ xfs_cmn_err_fsblock_zero(
return EFSCORRUPTED;
}
-STATIC int
+int
xfs_iomap_write_direct(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
- int flags,
xfs_bmbt_irec_t *imap,
- int *nmaps)
+ int nmaps)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb;
@@ -279,7 +159,7 @@ xfs_iomap_write_direct(
if (error)
goto error_out;
} else {
- if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK))
+ if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
imap->br_blockcount +
imap->br_startoff);
@@ -331,7 +211,7 @@ xfs_iomap_write_direct(
xfs_trans_ijoin(tp, ip);
bmapi_flag = XFS_BMAPI_WRITE;
- if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
+ if (offset < ip->i_size || extsz)
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
@@ -370,7 +250,6 @@ xfs_iomap_write_direct(
goto error_out;
}
- *nmaps = 1;
return 0;
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
@@ -379,7 +258,6 @@ error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
- *nmaps = 0; /* nothing set-up here */
error_out:
return XFS_ERROR(error);
@@ -389,6 +267,9 @@ error_out:
* If the caller is doing a write at the end of the file, then extend the
* allocation out to the file system's write iosize. We clean up any extra
* space left over when the file is closed in xfs_inactive().
+ *
+ * If we find we already have delalloc preallocation beyond EOF, don't do more
+ * preallocation as it it not needed.
*/
STATIC int
xfs_iomap_eof_want_preallocate(
@@ -396,7 +277,6 @@ xfs_iomap_eof_want_preallocate(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
- int ioflag,
xfs_bmbt_irec_t *imap,
int nimaps,
int *prealloc)
@@ -405,6 +285,7 @@ xfs_iomap_eof_want_preallocate(
xfs_filblks_t count_fsb;
xfs_fsblock_t firstblock;
int n, error, imaps;
+ int found_delalloc = 0;
*prealloc = 0;
if ((offset + count) <= ip->i_size)
@@ -429,20 +310,66 @@ xfs_iomap_eof_want_preallocate(
return 0;
start_fsb += imap[n].br_blockcount;
count_fsb -= imap[n].br_blockcount;
+
+ if (imap[n].br_startblock == DELAYSTARTBLOCK)
+ found_delalloc = 1;
}
}
- *prealloc = 1;
+ if (!found_delalloc)
+ *prealloc = 1;
return 0;
}
-STATIC int
+/*
+ * If we don't have a user specified preallocation size, dynamically increase
+ * the preallocation size as the size of the file grows. Cap the maximum size
+ * at a single extent or less if the filesystem is near full. The closer the
+ * filesystem is to full, the smaller the maximum prealocation.
+ */
+STATIC xfs_fsblock_t
+xfs_iomap_prealloc_size(
+ struct xfs_mount *mp,
+ struct xfs_inode *ip)
+{
+ xfs_fsblock_t alloc_blocks = 0;
+
+ if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
+ int shift = 0;
+ int64_t freesp;
+
+ alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size);
+ alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
+ rounddown_pow_of_two(alloc_blocks));
+
+ xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
+ freesp = mp->m_sb.sb_fdblocks;
+ if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
+ shift = 2;
+ if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
+ shift++;
+ if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
+ shift++;
+ if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
+ shift++;
+ if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
+ shift++;
+ }
+ if (shift)
+ alloc_blocks >>= shift;
+ }
+
+ if (alloc_blocks < mp->m_writeio_blocks)
+ alloc_blocks = mp->m_writeio_blocks;
+
+ return alloc_blocks;
+}
+
+int
xfs_iomap_write_delay(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
- int ioflag,
- xfs_bmbt_irec_t *ret_imap,
- int *nmaps)
+ xfs_bmbt_irec_t *ret_imap)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb;
@@ -469,16 +396,19 @@ xfs_iomap_write_delay(
extsz = xfs_get_extsz_hint(ip);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
- ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
+ imap, XFS_WRITE_IMAPS, &prealloc);
if (error)
return error;
retry:
if (prealloc) {
+ xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
+
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
- last_fsb = ioalign + mp->m_writeio_blocks;
+ last_fsb = ioalign + alloc_blocks;
} else {
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
}
@@ -496,22 +426,31 @@ retry:
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
&nimaps, NULL);
- if (error && (error != ENOSPC))
+ switch (error) {
+ case 0:
+ case ENOSPC:
+ case EDQUOT:
+ break;
+ default:
return XFS_ERROR(error);
+ }
/*
- * If bmapi returned us nothing, and if we didn't get back EDQUOT,
- * then we must have run out of space - flush all other inodes with
- * delalloc blocks and retry without EOF preallocation.
+ * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For
+ * ENOSPC, * flush all other inodes with delalloc blocks to free up
+ * some of the excess reserved metadata space. For both cases, retry
+ * without EOF preallocation.
*/
if (nimaps == 0) {
trace_xfs_delalloc_enospc(ip, offset, count);
if (flushed)
- return XFS_ERROR(ENOSPC);
+ return XFS_ERROR(error ? error : ENOSPC);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_flush_inodes(ip);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
+ if (error == ENOSPC) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_flush_inodes(ip);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ }
flushed = 1;
error = 0;
@@ -523,8 +462,6 @@ retry:
return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
*ret_imap = imap[0];
- *nmaps = 1;
-
return 0;
}
@@ -538,13 +475,12 @@ retry:
* We no longer bother to look at the incoming map - all we have to
* guarantee is that whatever we allocate fills the required range.
*/
-STATIC int
+int
xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
- xfs_bmbt_irec_t *imap,
- int *retmap)
+ xfs_bmbt_irec_t *imap)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb, last_block;
@@ -557,8 +493,6 @@ xfs_iomap_write_allocate(
int error = 0;
int nres;
- *retmap = 0;
-
/*
* Make sure that the dquots are there.
*/
@@ -680,7 +614,6 @@ xfs_iomap_write_allocate(
if ((offset_fsb >= imap->br_startoff) &&
(offset_fsb < (imap->br_startoff +
imap->br_blockcount))) {
- *retmap = 1;
XFS_STATS_INC(xs_xstrat_quick);
return 0;
}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 7748a430f50..80615760959 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,30 +18,15 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
-/* base extent manipulation calls */
-#define BMAPI_READ (1 << 0) /* read extents */
-#define BMAPI_WRITE (1 << 1) /* create extents */
-#define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */
-
-/* modifiers */
-#define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */
-#define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */
-#define BMAPI_MMA (1 << 6) /* allocate for mmap write */
-#define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */
-
-#define BMAPI_FLAGS \
- { BMAPI_READ, "READ" }, \
- { BMAPI_WRITE, "WRITE" }, \
- { BMAPI_ALLOCATE, "ALLOCATE" }, \
- { BMAPI_IGNSTATE, "IGNSTATE" }, \
- { BMAPI_DIRECT, "DIRECT" }, \
- { BMAPI_TRYLOCK, "TRYLOCK" }
-
struct xfs_inode;
struct xfs_bmbt_irec;
-extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int,
- struct xfs_bmbt_irec *, int *, int *);
+extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+ struct xfs_bmbt_irec *, int);
+extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+ struct xfs_bmbt_irec *);
+extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+ struct xfs_bmbt_irec *);
extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index cee4ab9f8a9..0bf24b11d0c 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -47,7 +47,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
xfs_buftarg_t *log_target,
xfs_daddr_t blk_offset,
int num_bblks);
-STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes);
+STATIC int xlog_space_left(struct log *log, atomic64_t *head);
STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
STATIC void xlog_dealloc_log(xlog_t *log);
@@ -70,7 +70,7 @@ STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
/* local functions to manipulate grant head */
STATIC int xlog_grant_log_space(xlog_t *log,
xlog_ticket_t *xtic);
-STATIC void xlog_grant_push_ail(xfs_mount_t *mp,
+STATIC void xlog_grant_push_ail(struct log *log,
int need_bytes);
STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
xlog_ticket_t *ticket);
@@ -81,98 +81,73 @@ STATIC void xlog_ungrant_log_space(xlog_t *log,
#if defined(DEBUG)
STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void xlog_verify_grant_head(xlog_t *log, int equals);
+STATIC void xlog_verify_grant_tail(struct log *log);
STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
int count, boolean_t syncing);
STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
xfs_lsn_t tail_lsn);
#else
#define xlog_verify_dest_ptr(a,b)
-#define xlog_verify_grant_head(a,b)
+#define xlog_verify_grant_tail(a)
#define xlog_verify_iclog(a,b,c,d)
#define xlog_verify_tail_lsn(a,b,c)
#endif
STATIC int xlog_iclogs_empty(xlog_t *log);
-
static void
-xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+xlog_grant_sub_space(
+ struct log *log,
+ atomic64_t *head,
+ int bytes)
{
- if (*qp) {
- tic->t_next = (*qp);
- tic->t_prev = (*qp)->t_prev;
- (*qp)->t_prev->t_next = tic;
- (*qp)->t_prev = tic;
- } else {
- tic->t_prev = tic->t_next = tic;
- *qp = tic;
- }
+ int64_t head_val = atomic64_read(head);
+ int64_t new, old;
- tic->t_flags |= XLOG_TIC_IN_Q;
-}
+ do {
+ int cycle, space;
-static void
-xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
-{
- if (tic == tic->t_next) {
- *qp = NULL;
- } else {
- *qp = tic->t_next;
- tic->t_next->t_prev = tic->t_prev;
- tic->t_prev->t_next = tic->t_next;
- }
+ xlog_crack_grant_head_val(head_val, &cycle, &space);
- tic->t_next = tic->t_prev = NULL;
- tic->t_flags &= ~XLOG_TIC_IN_Q;
+ space -= bytes;
+ if (space < 0) {
+ space += log->l_logsize;
+ cycle--;
+ }
+
+ old = head_val;
+ new = xlog_assign_grant_head_val(cycle, space);
+ head_val = atomic64_cmpxchg(head, old, new);
+ } while (head_val != old);
}
static void
-xlog_grant_sub_space(struct log *log, int bytes)
+xlog_grant_add_space(
+ struct log *log,
+ atomic64_t *head,
+ int bytes)
{
- log->l_grant_write_bytes -= bytes;
- if (log->l_grant_write_bytes < 0) {
- log->l_grant_write_bytes += log->l_logsize;
- log->l_grant_write_cycle--;
- }
-
- log->l_grant_reserve_bytes -= bytes;
- if ((log)->l_grant_reserve_bytes < 0) {
- log->l_grant_reserve_bytes += log->l_logsize;
- log->l_grant_reserve_cycle--;
- }
+ int64_t head_val = atomic64_read(head);
+ int64_t new, old;
-}
+ do {
+ int tmp;
+ int cycle, space;
-static void
-xlog_grant_add_space_write(struct log *log, int bytes)
-{
- int tmp = log->l_logsize - log->l_grant_write_bytes;
- if (tmp > bytes)
- log->l_grant_write_bytes += bytes;
- else {
- log->l_grant_write_cycle++;
- log->l_grant_write_bytes = bytes - tmp;
- }
-}
+ xlog_crack_grant_head_val(head_val, &cycle, &space);
-static void
-xlog_grant_add_space_reserve(struct log *log, int bytes)
-{
- int tmp = log->l_logsize - log->l_grant_reserve_bytes;
- if (tmp > bytes)
- log->l_grant_reserve_bytes += bytes;
- else {
- log->l_grant_reserve_cycle++;
- log->l_grant_reserve_bytes = bytes - tmp;
- }
-}
+ tmp = log->l_logsize - space;
+ if (tmp > bytes)
+ space += bytes;
+ else {
+ space = bytes - tmp;
+ cycle++;
+ }
-static inline void
-xlog_grant_add_space(struct log *log, int bytes)
-{
- xlog_grant_add_space_write(log, bytes);
- xlog_grant_add_space_reserve(log, bytes);
+ old = head_val;
+ new = xlog_assign_grant_head_val(cycle, space);
+ head_val = atomic64_cmpxchg(head, old, new);
+ } while (head_val != old);
}
static void
@@ -355,7 +330,7 @@ xfs_log_reserve(
trace_xfs_log_reserve(log, internal_ticket);
- xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
+ xlog_grant_push_ail(log, internal_ticket->t_unit_res);
retval = xlog_regrant_write_log_space(log, internal_ticket);
} else {
/* may sleep if need to allocate more tickets */
@@ -369,7 +344,7 @@ xfs_log_reserve(
trace_xfs_log_reserve(log, internal_ticket);
- xlog_grant_push_ail(mp,
+ xlog_grant_push_ail(log,
(internal_ticket->t_unit_res *
internal_ticket->t_cnt));
retval = xlog_grant_log_space(log, internal_ticket);
@@ -584,8 +559,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
iclog->ic_state == XLOG_STATE_DIRTY)) {
if (!XLOG_FORCED_SHUTDOWN(log)) {
- sv_wait(&iclog->ic_force_wait, PMEM,
- &log->l_icloglock, s);
+ xlog_wait(&iclog->ic_force_wait,
+ &log->l_icloglock);
} else {
spin_unlock(&log->l_icloglock);
}
@@ -625,8 +600,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|| iclog->ic_state == XLOG_STATE_DIRTY
|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
- sv_wait(&iclog->ic_force_wait, PMEM,
- &log->l_icloglock, s);
+ xlog_wait(&iclog->ic_force_wait,
+ &log->l_icloglock);
} else {
spin_unlock(&log->l_icloglock);
}
@@ -703,55 +678,46 @@ xfs_log_move_tail(xfs_mount_t *mp,
{
xlog_ticket_t *tic;
xlog_t *log = mp->m_log;
- int need_bytes, free_bytes, cycle, bytes;
+ int need_bytes, free_bytes;
if (XLOG_FORCED_SHUTDOWN(log))
return;
- if (tail_lsn == 0) {
- /* needed since sync_lsn is 64 bits */
- spin_lock(&log->l_icloglock);
- tail_lsn = log->l_last_sync_lsn;
- spin_unlock(&log->l_icloglock);
- }
-
- spin_lock(&log->l_grant_lock);
+ if (tail_lsn == 0)
+ tail_lsn = atomic64_read(&log->l_last_sync_lsn);
- /* Also an invalid lsn. 1 implies that we aren't passing in a valid
- * tail_lsn.
- */
- if (tail_lsn != 1) {
- log->l_tail_lsn = tail_lsn;
- }
+ /* tail_lsn == 1 implies that we weren't passed a valid value. */
+ if (tail_lsn != 1)
+ atomic64_set(&log->l_tail_lsn, tail_lsn);
- if ((tic = log->l_write_headq)) {
+ if (!list_empty_careful(&log->l_writeq)) {
#ifdef DEBUG
if (log->l_flags & XLOG_ACTIVE_RECOVERY)
panic("Recovery problem");
#endif
- cycle = log->l_grant_write_cycle;
- bytes = log->l_grant_write_bytes;
- free_bytes = xlog_space_left(log, cycle, bytes);
- do {
+ spin_lock(&log->l_grant_write_lock);
+ free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+ list_for_each_entry(tic, &log->l_writeq, t_queue) {
ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
if (free_bytes < tic->t_unit_res && tail_lsn != 1)
break;
tail_lsn = 0;
free_bytes -= tic->t_unit_res;
- sv_signal(&tic->t_wait);
- tic = tic->t_next;
- } while (tic != log->l_write_headq);
+ trace_xfs_log_regrant_write_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+ spin_unlock(&log->l_grant_write_lock);
}
- if ((tic = log->l_reserve_headq)) {
+
+ if (!list_empty_careful(&log->l_reserveq)) {
#ifdef DEBUG
if (log->l_flags & XLOG_ACTIVE_RECOVERY)
panic("Recovery problem");
#endif
- cycle = log->l_grant_reserve_cycle;
- bytes = log->l_grant_reserve_bytes;
- free_bytes = xlog_space_left(log, cycle, bytes);
- do {
+ spin_lock(&log->l_grant_reserve_lock);
+ free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+ list_for_each_entry(tic, &log->l_reserveq, t_queue) {
if (tic->t_flags & XLOG_TIC_PERM_RESERV)
need_bytes = tic->t_unit_res*tic->t_cnt;
else
@@ -760,12 +726,12 @@ xfs_log_move_tail(xfs_mount_t *mp,
break;
tail_lsn = 0;
free_bytes -= need_bytes;
- sv_signal(&tic->t_wait);
- tic = tic->t_next;
- } while (tic != log->l_reserve_headq);
+ trace_xfs_log_grant_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+ spin_unlock(&log->l_grant_reserve_lock);
}
- spin_unlock(&log->l_grant_lock);
-} /* xfs_log_move_tail */
+}
/*
* Determine if we have a transaction that has gone to disk
@@ -831,23 +797,19 @@ xfs_log_need_covered(xfs_mount_t *mp)
* We may be holding the log iclog lock upon entering this routine.
*/
xfs_lsn_t
-xlog_assign_tail_lsn(xfs_mount_t *mp)
+xlog_assign_tail_lsn(
+ struct xfs_mount *mp)
{
- xfs_lsn_t tail_lsn;
- xlog_t *log = mp->m_log;
+ xfs_lsn_t tail_lsn;
+ struct log *log = mp->m_log;
tail_lsn = xfs_trans_ail_tail(mp->m_ail);
- spin_lock(&log->l_grant_lock);
- if (tail_lsn != 0) {
- log->l_tail_lsn = tail_lsn;
- } else {
- tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
- }
- spin_unlock(&log->l_grant_lock);
+ if (!tail_lsn)
+ tail_lsn = atomic64_read(&log->l_last_sync_lsn);
+ atomic64_set(&log->l_tail_lsn, tail_lsn);
return tail_lsn;
-} /* xlog_assign_tail_lsn */
-
+}
/*
* Return the space in the log between the tail and the head. The head
@@ -864,21 +826,26 @@ xlog_assign_tail_lsn(xfs_mount_t *mp)
* result is that we return the size of the log as the amount of space left.
*/
STATIC int
-xlog_space_left(xlog_t *log, int cycle, int bytes)
-{
- int free_bytes;
- int tail_bytes;
- int tail_cycle;
-
- tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
- tail_cycle = CYCLE_LSN(log->l_tail_lsn);
- if ((tail_cycle == cycle) && (bytes >= tail_bytes)) {
- free_bytes = log->l_logsize - (bytes - tail_bytes);
- } else if ((tail_cycle + 1) < cycle) {
+xlog_space_left(
+ struct log *log,
+ atomic64_t *head)
+{
+ int free_bytes;
+ int tail_bytes;
+ int tail_cycle;
+ int head_cycle;
+ int head_bytes;
+
+ xlog_crack_grant_head(head, &head_cycle, &head_bytes);
+ xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
+ tail_bytes = BBTOB(tail_bytes);
+ if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
+ free_bytes = log->l_logsize - (head_bytes - tail_bytes);
+ else if (tail_cycle + 1 < head_cycle)
return 0;
- } else if (tail_cycle < cycle) {
- ASSERT(tail_cycle == (cycle - 1));
- free_bytes = tail_bytes - bytes;
+ else if (tail_cycle < head_cycle) {
+ ASSERT(tail_cycle == (head_cycle - 1));
+ free_bytes = tail_bytes - head_bytes;
} else {
/*
* The reservation head is behind the tail.
@@ -889,12 +856,12 @@ xlog_space_left(xlog_t *log, int cycle, int bytes)
"xlog_space_left: head behind tail\n"
" tail_cycle = %d, tail_bytes = %d\n"
" GH cycle = %d, GH bytes = %d",
- tail_cycle, tail_bytes, cycle, bytes);
+ tail_cycle, tail_bytes, head_cycle, head_bytes);
ASSERT(0);
free_bytes = log->l_logsize;
}
return free_bytes;
-} /* xlog_space_left */
+}
/*
@@ -1047,12 +1014,16 @@ xlog_alloc_log(xfs_mount_t *mp,
log->l_flags |= XLOG_ACTIVE_RECOVERY;
log->l_prev_block = -1;
- log->l_tail_lsn = xlog_assign_lsn(1, 0);
/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
- log->l_last_sync_lsn = log->l_tail_lsn;
+ xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
+ xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
- log->l_grant_reserve_cycle = 1;
- log->l_grant_write_cycle = 1;
+ xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
+ xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
+ INIT_LIST_HEAD(&log->l_reserveq);
+ INIT_LIST_HEAD(&log->l_writeq);
+ spin_lock_init(&log->l_grant_reserve_lock);
+ spin_lock_init(&log->l_grant_write_lock);
error = EFSCORRUPTED;
if (xfs_sb_version_hassector(&mp->m_sb)) {
@@ -1094,8 +1065,7 @@ xlog_alloc_log(xfs_mount_t *mp,
log->l_xbuf = bp;
spin_lock_init(&log->l_icloglock);
- spin_lock_init(&log->l_grant_lock);
- sv_init(&log->l_flush_wait, 0, "flush_wait");
+ init_waitqueue_head(&log->l_flush_wait);
/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1151,8 +1121,8 @@ xlog_alloc_log(xfs_mount_t *mp,
ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
- sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
- sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
+ init_waitqueue_head(&iclog->ic_force_wait);
+ init_waitqueue_head(&iclog->ic_write_wait);
iclogp = &iclog->ic_next;
}
@@ -1167,15 +1137,11 @@ xlog_alloc_log(xfs_mount_t *mp,
out_free_iclog:
for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
prev_iclog = iclog->ic_next;
- if (iclog->ic_bp) {
- sv_destroy(&iclog->ic_force_wait);
- sv_destroy(&iclog->ic_write_wait);
+ if (iclog->ic_bp)
xfs_buf_free(iclog->ic_bp);
- }
kmem_free(iclog);
}
spinlock_destroy(&log->l_icloglock);
- spinlock_destroy(&log->l_grant_lock);
xfs_buf_free(log->l_xbuf);
out_free_log:
kmem_free(log);
@@ -1223,61 +1189,60 @@ xlog_commit_record(
* water mark. In this manner, we would be creating a low water mark.
*/
STATIC void
-xlog_grant_push_ail(xfs_mount_t *mp,
- int need_bytes)
+xlog_grant_push_ail(
+ struct log *log,
+ int need_bytes)
{
- xlog_t *log = mp->m_log; /* pointer to the log */
- xfs_lsn_t tail_lsn; /* lsn of the log tail */
- xfs_lsn_t threshold_lsn = 0; /* lsn we'd like to be at */
- int free_blocks; /* free blocks left to write to */
- int free_bytes; /* free bytes left to write to */
- int threshold_block; /* block in lsn we'd like to be at */
- int threshold_cycle; /* lsn cycle we'd like to be at */
- int free_threshold;
-
- ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
-
- spin_lock(&log->l_grant_lock);
- free_bytes = xlog_space_left(log,
- log->l_grant_reserve_cycle,
- log->l_grant_reserve_bytes);
- tail_lsn = log->l_tail_lsn;
- free_blocks = BTOBBT(free_bytes);
-
- /*
- * Set the threshold for the minimum number of free blocks in the
- * log to the maximum of what the caller needs, one quarter of the
- * log, and 256 blocks.
- */
- free_threshold = BTOBB(need_bytes);
- free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
- free_threshold = MAX(free_threshold, 256);
- if (free_blocks < free_threshold) {
- threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
- threshold_cycle = CYCLE_LSN(tail_lsn);
+ xfs_lsn_t threshold_lsn = 0;
+ xfs_lsn_t last_sync_lsn;
+ int free_blocks;
+ int free_bytes;
+ int threshold_block;
+ int threshold_cycle;
+ int free_threshold;
+
+ ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
+
+ free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+ free_blocks = BTOBBT(free_bytes);
+
+ /*
+ * Set the threshold for the minimum number of free blocks in the
+ * log to the maximum of what the caller needs, one quarter of the
+ * log, and 256 blocks.
+ */
+ free_threshold = BTOBB(need_bytes);
+ free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
+ free_threshold = MAX(free_threshold, 256);
+ if (free_blocks >= free_threshold)
+ return;
+
+ xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
+ &threshold_block);
+ threshold_block += free_threshold;
if (threshold_block >= log->l_logBBsize) {
- threshold_block -= log->l_logBBsize;
- threshold_cycle += 1;
+ threshold_block -= log->l_logBBsize;
+ threshold_cycle += 1;
}
- threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block);
+ threshold_lsn = xlog_assign_lsn(threshold_cycle,
+ threshold_block);
+ /*
+ * Don't pass in an lsn greater than the lsn of the last
+ * log record known to be on disk. Use a snapshot of the last sync lsn
+ * so that it doesn't change between the compare and the set.
+ */
+ last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
+ if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
+ threshold_lsn = last_sync_lsn;
- /* Don't pass in an lsn greater than the lsn of the last
- * log record known to be on disk.
+ /*
+ * Get the transaction layer to kick the dirty buffers out to
+ * disk asynchronously. No point in trying to do this if
+ * the filesystem is shutting down.
*/
- if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
- threshold_lsn = log->l_last_sync_lsn;
- }
- spin_unlock(&log->l_grant_lock);
-
- /*
- * Get the transaction layer to kick the dirty buffers out to
- * disk asynchronously. No point in trying to do this if
- * the filesystem is shutting down.
- */
- if (threshold_lsn &&
- !XLOG_FORCED_SHUTDOWN(log))
- xfs_trans_ail_push(log->l_ailp, threshold_lsn);
-} /* xlog_grant_push_ail */
+ if (!XLOG_FORCED_SHUTDOWN(log))
+ xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+}
/*
* The bdstrat callback function for log bufs. This gives us a central
@@ -1372,9 +1337,8 @@ xlog_sync(xlog_t *log,
roundoff < BBTOB(1)));
/* move grant heads by roundoff in sync */
- spin_lock(&log->l_grant_lock);
- xlog_grant_add_space(log, roundoff);
- spin_unlock(&log->l_grant_lock);
+ xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
+ xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
/* put cycle number in every block */
xlog_pack_data(log, iclog, roundoff);
@@ -1489,15 +1453,12 @@ xlog_dealloc_log(xlog_t *log)
iclog = log->l_iclog;
for (i=0; i<log->l_iclog_bufs; i++) {
- sv_destroy(&iclog->ic_force_wait);
- sv_destroy(&iclog->ic_write_wait);
xfs_buf_free(iclog->ic_bp);
next_iclog = iclog->ic_next;
kmem_free(iclog);
iclog = next_iclog;
}
spinlock_destroy(&log->l_icloglock);
- spinlock_destroy(&log->l_grant_lock);
xfs_buf_free(log->l_xbuf);
log->l_mp->m_log = NULL;
@@ -2232,7 +2193,7 @@ xlog_state_do_callback(
lowest_lsn = xlog_get_lowest_lsn(log);
if (lowest_lsn &&
XFS_LSN_CMP(lowest_lsn,
- be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
+ be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
iclog = iclog->ic_next;
continue; /* Leave this iclog for
* another thread */
@@ -2240,23 +2201,21 @@ xlog_state_do_callback(
iclog->ic_state = XLOG_STATE_CALLBACK;
- spin_unlock(&log->l_icloglock);
- /* l_last_sync_lsn field protected by
- * l_grant_lock. Don't worry about iclog's lsn.
- * No one else can be here except us.
+ /*
+ * update the last_sync_lsn before we drop the
+ * icloglock to ensure we are the only one that
+ * can update it.
*/
- spin_lock(&log->l_grant_lock);
- ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
- be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
- log->l_last_sync_lsn =
- be64_to_cpu(iclog->ic_header.h_lsn);
- spin_unlock(&log->l_grant_lock);
+ ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
+ be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
+ atomic64_set(&log->l_last_sync_lsn,
+ be64_to_cpu(iclog->ic_header.h_lsn));
- } else {
- spin_unlock(&log->l_icloglock);
+ } else
ioerrors++;
- }
+
+ spin_unlock(&log->l_icloglock);
/*
* Keep processing entries in the callback list until
@@ -2297,7 +2256,7 @@ xlog_state_do_callback(
xlog_state_clean_log(log);
/* wake up threads waiting in xfs_log_force() */
- sv_broadcast(&iclog->ic_force_wait);
+ wake_up_all(&iclog->ic_force_wait);
iclog = iclog->ic_next;
} while (first_iclog != iclog);
@@ -2344,7 +2303,7 @@ xlog_state_do_callback(
spin_unlock(&log->l_icloglock);
if (wake)
- sv_broadcast(&log->l_flush_wait);
+ wake_up_all(&log->l_flush_wait);
}
@@ -2395,7 +2354,7 @@ xlog_state_done_syncing(
* iclog buffer, we wake them all, one will get to do the
* I/O, the others get to wait for the result.
*/
- sv_broadcast(&iclog->ic_write_wait);
+ wake_up_all(&iclog->ic_write_wait);
spin_unlock(&log->l_icloglock);
xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
} /* xlog_state_done_syncing */
@@ -2444,7 +2403,7 @@ restart:
XFS_STATS_INC(xs_log_noiclogs);
/* Wait for log writes to have flushed */
- sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0);
+ xlog_wait(&log->l_flush_wait, &log->l_icloglock);
goto restart;
}
@@ -2527,6 +2486,18 @@ restart:
*
* Once a ticket gets put onto the reserveq, it will only return after
* the needed reservation is satisfied.
+ *
+ * This function is structured so that it has a lock free fast path. This is
+ * necessary because every new transaction reservation will come through this
+ * path. Hence any lock will be globally hot if we take it unconditionally on
+ * every pass.
+ *
+ * As tickets are only ever moved on and off the reserveq under the
+ * l_grant_reserve_lock, we only need to take that lock if we are going
+ * to add the ticket to the queue and sleep. We can avoid taking the lock if the
+ * ticket was never added to the reserveq because the t_queue list head will be
+ * empty and we hold the only reference to it so it can safely be checked
+ * unlocked.
*/
STATIC int
xlog_grant_log_space(xlog_t *log,
@@ -2534,24 +2505,27 @@ xlog_grant_log_space(xlog_t *log,
{
int free_bytes;
int need_bytes;
-#ifdef DEBUG
- xfs_lsn_t tail_lsn;
-#endif
-
#ifdef DEBUG
if (log->l_flags & XLOG_ACTIVE_RECOVERY)
panic("grant Recovery problem");
#endif
- /* Is there space or do we need to sleep? */
- spin_lock(&log->l_grant_lock);
-
trace_xfs_log_grant_enter(log, tic);
+ need_bytes = tic->t_unit_res;
+ if (tic->t_flags & XFS_LOG_PERM_RESERV)
+ need_bytes *= tic->t_ocnt;
+
/* something is already sleeping; insert new transaction at end */
- if (log->l_reserve_headq) {
- xlog_ins_ticketq(&log->l_reserve_headq, tic);
+ if (!list_empty_careful(&log->l_reserveq)) {
+ spin_lock(&log->l_grant_reserve_lock);
+ /* recheck the queue now we are locked */
+ if (list_empty(&log->l_reserveq)) {
+ spin_unlock(&log->l_grant_reserve_lock);
+ goto redo;
+ }
+ list_add_tail(&tic->t_queue, &log->l_reserveq);
trace_xfs_log_grant_sleep1(log, tic);
@@ -2563,72 +2537,57 @@ xlog_grant_log_space(xlog_t *log,
goto error_return;
XFS_STATS_INC(xs_sleep_logspace);
- sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
+ xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+
/*
* If we got an error, and the filesystem is shutting down,
* we'll catch it down below. So just continue...
*/
trace_xfs_log_grant_wake1(log, tic);
- spin_lock(&log->l_grant_lock);
}
- if (tic->t_flags & XFS_LOG_PERM_RESERV)
- need_bytes = tic->t_unit_res*tic->t_ocnt;
- else
- need_bytes = tic->t_unit_res;
redo:
if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
+ goto error_return_unlocked;
- free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle,
- log->l_grant_reserve_bytes);
+ free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
if (free_bytes < need_bytes) {
- if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
- xlog_ins_ticketq(&log->l_reserve_headq, tic);
+ spin_lock(&log->l_grant_reserve_lock);
+ if (list_empty(&tic->t_queue))
+ list_add_tail(&tic->t_queue, &log->l_reserveq);
trace_xfs_log_grant_sleep2(log, tic);
- spin_unlock(&log->l_grant_lock);
- xlog_grant_push_ail(log->l_mp, need_bytes);
- spin_lock(&log->l_grant_lock);
-
- XFS_STATS_INC(xs_sleep_logspace);
- sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
-
- spin_lock(&log->l_grant_lock);
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- trace_xfs_log_grant_wake2(log, tic);
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+ trace_xfs_log_grant_wake2(log, tic);
goto redo;
- } else if (tic->t_flags & XLOG_TIC_IN_Q)
- xlog_del_ticketq(&log->l_reserve_headq, tic);
+ }
- /* we've got enough space */
- xlog_grant_add_space(log, need_bytes);
-#ifdef DEBUG
- tail_lsn = log->l_tail_lsn;
- /*
- * Check to make sure the grant write head didn't just over lap the
- * tail. If the cycles are the same, we can't be overlapping.
- * Otherwise, make sure that the cycles differ by exactly one and
- * check the byte count.
- */
- if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
- ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
- ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
+ if (!list_empty(&tic->t_queue)) {
+ spin_lock(&log->l_grant_reserve_lock);
+ list_del_init(&tic->t_queue);
+ spin_unlock(&log->l_grant_reserve_lock);
}
-#endif
+
+ /* we've got enough space */
+ xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
+ xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_grant_exit(log, tic);
- xlog_verify_grant_head(log, 1);
- spin_unlock(&log->l_grant_lock);
+ xlog_verify_grant_tail(log);
return 0;
- error_return:
- if (tic->t_flags & XLOG_TIC_IN_Q)
- xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+error_return_unlocked:
+ spin_lock(&log->l_grant_reserve_lock);
+error_return:
+ list_del_init(&tic->t_queue);
+ spin_unlock(&log->l_grant_reserve_lock);
trace_xfs_log_grant_error(log, tic);
/*
@@ -2638,7 +2597,6 @@ redo:
*/
tic->t_curr_res = 0;
tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- spin_unlock(&log->l_grant_lock);
return XFS_ERROR(EIO);
} /* xlog_grant_log_space */
@@ -2646,17 +2604,14 @@ redo:
/*
* Replenish the byte reservation required by moving the grant write head.
*
- *
+ * Similar to xlog_grant_log_space, the function is structured to have a lock
+ * free fast path.
*/
STATIC int
xlog_regrant_write_log_space(xlog_t *log,
xlog_ticket_t *tic)
{
int free_bytes, need_bytes;
- xlog_ticket_t *ntic;
-#ifdef DEBUG
- xfs_lsn_t tail_lsn;
-#endif
tic->t_curr_res = tic->t_unit_res;
xlog_tic_reset_res(tic);
@@ -2669,12 +2624,9 @@ xlog_regrant_write_log_space(xlog_t *log,
panic("regrant Recovery problem");
#endif
- spin_lock(&log->l_grant_lock);
-
trace_xfs_log_regrant_write_enter(log, tic);
-
if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
+ goto error_return_unlocked;
/* If there are other waiters on the queue then give them a
* chance at logspace before us. Wake up the first waiters,
@@ -2683,92 +2635,76 @@ xlog_regrant_write_log_space(xlog_t *log,
* this transaction.
*/
need_bytes = tic->t_unit_res;
- if ((ntic = log->l_write_headq)) {
- free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
- log->l_grant_write_bytes);
- do {
+ if (!list_empty_careful(&log->l_writeq)) {
+ struct xlog_ticket *ntic;
+
+ spin_lock(&log->l_grant_write_lock);
+ free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+ list_for_each_entry(ntic, &log->l_writeq, t_queue) {
ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
if (free_bytes < ntic->t_unit_res)
break;
free_bytes -= ntic->t_unit_res;
- sv_signal(&ntic->t_wait);
- ntic = ntic->t_next;
- } while (ntic != log->l_write_headq);
-
- if (ntic != log->l_write_headq) {
- if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
- xlog_ins_ticketq(&log->l_write_headq, tic);
+ wake_up(&ntic->t_wait);
+ }
+ if (ntic != list_first_entry(&log->l_writeq,
+ struct xlog_ticket, t_queue)) {
+ if (list_empty(&tic->t_queue))
+ list_add_tail(&tic->t_queue, &log->l_writeq);
trace_xfs_log_regrant_write_sleep1(log, tic);
- spin_unlock(&log->l_grant_lock);
- xlog_grant_push_ail(log->l_mp, need_bytes);
- spin_lock(&log->l_grant_lock);
+ xlog_grant_push_ail(log, need_bytes);
XFS_STATS_INC(xs_sleep_logspace);
- sv_wait(&tic->t_wait, PINOD|PLTWAIT,
- &log->l_grant_lock, s);
-
- /* If we're shutting down, this tic is already
- * off the queue */
- spin_lock(&log->l_grant_lock);
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
+ xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
trace_xfs_log_regrant_write_wake1(log, tic);
- }
+ } else
+ spin_unlock(&log->l_grant_write_lock);
}
redo:
if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
+ goto error_return_unlocked;
- free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
- log->l_grant_write_bytes);
+ free_bytes = xlog_space_left(log, &log->l_grant_write_head);
if (free_bytes < need_bytes) {
- if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
- xlog_ins_ticketq(&log->l_write_headq, tic);
- spin_unlock(&log->l_grant_lock);
- xlog_grant_push_ail(log->l_mp, need_bytes);
- spin_lock(&log->l_grant_lock);
-
- XFS_STATS_INC(xs_sleep_logspace);
- trace_xfs_log_regrant_write_sleep2(log, tic);
-
- sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
+ spin_lock(&log->l_grant_write_lock);
+ if (list_empty(&tic->t_queue))
+ list_add_tail(&tic->t_queue, &log->l_writeq);
- /* If we're shutting down, this tic is already off the queue */
- spin_lock(&log->l_grant_lock);
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_regrant_write_sleep2(log, tic);
+ xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+
trace_xfs_log_regrant_write_wake2(log, tic);
goto redo;
- } else if (tic->t_flags & XLOG_TIC_IN_Q)
- xlog_del_ticketq(&log->l_write_headq, tic);
+ }
- /* we've got enough space */
- xlog_grant_add_space_write(log, need_bytes);
-#ifdef DEBUG
- tail_lsn = log->l_tail_lsn;
- if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
- ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
- ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
+ if (!list_empty(&tic->t_queue)) {
+ spin_lock(&log->l_grant_write_lock);
+ list_del_init(&tic->t_queue);
+ spin_unlock(&log->l_grant_write_lock);
}
-#endif
+ /* we've got enough space */
+ xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_regrant_write_exit(log, tic);
-
- xlog_verify_grant_head(log, 1);
- spin_unlock(&log->l_grant_lock);
+ xlog_verify_grant_tail(log);
return 0;
+ error_return_unlocked:
+ spin_lock(&log->l_grant_write_lock);
error_return:
- if (tic->t_flags & XLOG_TIC_IN_Q)
- xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+ list_del_init(&tic->t_queue);
+ spin_unlock(&log->l_grant_write_lock);
trace_xfs_log_regrant_write_error(log, tic);
/*
@@ -2778,7 +2714,6 @@ redo:
*/
tic->t_curr_res = 0;
tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- spin_unlock(&log->l_grant_lock);
return XFS_ERROR(EIO);
} /* xlog_regrant_write_log_space */
@@ -2799,27 +2734,24 @@ xlog_regrant_reserve_log_space(xlog_t *log,
if (ticket->t_cnt > 0)
ticket->t_cnt--;
- spin_lock(&log->l_grant_lock);
- xlog_grant_sub_space(log, ticket->t_curr_res);
+ xlog_grant_sub_space(log, &log->l_grant_reserve_head,
+ ticket->t_curr_res);
+ xlog_grant_sub_space(log, &log->l_grant_write_head,
+ ticket->t_curr_res);
ticket->t_curr_res = ticket->t_unit_res;
xlog_tic_reset_res(ticket);
trace_xfs_log_regrant_reserve_sub(log, ticket);
- xlog_verify_grant_head(log, 1);
-
/* just return if we still have some of the pre-reserved space */
- if (ticket->t_cnt > 0) {
- spin_unlock(&log->l_grant_lock);
+ if (ticket->t_cnt > 0)
return;
- }
- xlog_grant_add_space_reserve(log, ticket->t_unit_res);
+ xlog_grant_add_space(log, &log->l_grant_reserve_head,
+ ticket->t_unit_res);
trace_xfs_log_regrant_reserve_exit(log, ticket);
- xlog_verify_grant_head(log, 0);
- spin_unlock(&log->l_grant_lock);
ticket->t_curr_res = ticket->t_unit_res;
xlog_tic_reset_res(ticket);
} /* xlog_regrant_reserve_log_space */
@@ -2843,28 +2775,29 @@ STATIC void
xlog_ungrant_log_space(xlog_t *log,
xlog_ticket_t *ticket)
{
+ int bytes;
+
if (ticket->t_cnt > 0)
ticket->t_cnt--;
- spin_lock(&log->l_grant_lock);
trace_xfs_log_ungrant_enter(log, ticket);
-
- xlog_grant_sub_space(log, ticket->t_curr_res);
-
trace_xfs_log_ungrant_sub(log, ticket);
- /* If this is a permanent reservation ticket, we may be able to free
+ /*
+ * If this is a permanent reservation ticket, we may be able to free
* up more space based on the remaining count.
*/
+ bytes = ticket->t_curr_res;
if (ticket->t_cnt > 0) {
ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
- xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
+ bytes += ticket->t_unit_res*ticket->t_cnt;
}
+ xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
+ xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
+
trace_xfs_log_ungrant_exit(log, ticket);
- xlog_verify_grant_head(log, 1);
- spin_unlock(&log->l_grant_lock);
xfs_log_move_tail(log->l_mp, 1);
} /* xlog_ungrant_log_space */
@@ -2901,11 +2834,11 @@ xlog_state_release_iclog(
if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
/* update tail before writing to iclog */
- xlog_assign_tail_lsn(log->l_mp);
+ xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
sync++;
iclog->ic_state = XLOG_STATE_SYNCING;
- iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
- xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ xlog_verify_tail_lsn(log, iclog, tail_lsn);
/* cycle incremented when incrementing curr_block */
}
spin_unlock(&log->l_icloglock);
@@ -3088,7 +3021,7 @@ maybe_sleep:
return XFS_ERROR(EIO);
}
XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
+ xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
/*
* No need to grab the log lock here since we're
* only deciding whether or not to return EIO
@@ -3206,8 +3139,8 @@ try_again:
XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_prev->ic_write_wait,
- PSWP, &log->l_icloglock, s);
+ xlog_wait(&iclog->ic_prev->ic_write_wait,
+ &log->l_icloglock);
if (log_flushed)
*log_flushed = 1;
already_slept = 1;
@@ -3235,7 +3168,7 @@ try_again:
return XFS_ERROR(EIO);
}
XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
+ xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
/*
* No need to grab the log lock here since we're
* only deciding whether or not to return EIO
@@ -3310,10 +3243,8 @@ xfs_log_ticket_put(
xlog_ticket_t *ticket)
{
ASSERT(atomic_read(&ticket->t_ref) > 0);
- if (atomic_dec_and_test(&ticket->t_ref)) {
- sv_destroy(&ticket->t_wait);
+ if (atomic_dec_and_test(&ticket->t_ref))
kmem_zone_free(xfs_log_ticket_zone, ticket);
- }
}
xlog_ticket_t *
@@ -3435,6 +3366,7 @@ xlog_ticket_alloc(
}
atomic_set(&tic->t_ref, 1);
+ INIT_LIST_HEAD(&tic->t_queue);
tic->t_unit_res = unit_bytes;
tic->t_curr_res = unit_bytes;
tic->t_cnt = cnt;
@@ -3445,7 +3377,7 @@ xlog_ticket_alloc(
tic->t_trans_type = 0;
if (xflags & XFS_LOG_PERM_RESERV)
tic->t_flags |= XLOG_TIC_PERM_RESERV;
- sv_init(&tic->t_wait, SV_DEFAULT, "logtick");
+ init_waitqueue_head(&tic->t_wait);
xlog_tic_reset_res(tic);
@@ -3484,18 +3416,25 @@ xlog_verify_dest_ptr(
}
STATIC void
-xlog_verify_grant_head(xlog_t *log, int equals)
+xlog_verify_grant_tail(
+ struct log *log)
{
- if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) {
- if (equals)
- ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes);
- else
- ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes);
- } else {
- ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle);
- ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes);
- }
-} /* xlog_verify_grant_head */
+ int tail_cycle, tail_blocks;
+ int cycle, space;
+
+ /*
+ * Check to make sure the grant write head didn't just over lap the
+ * tail. If the cycles are the same, we can't be overlapping.
+ * Otherwise, make sure that the cycles differ by exactly one and
+ * check the byte count.
+ */
+ xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
+ xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
+ if (tail_cycle != cycle) {
+ ASSERT(cycle - 1 == tail_cycle);
+ ASSERT(space <= BBTOB(tail_blocks));
+ }
+}
/* check if it will fit */
STATIC void
@@ -3716,12 +3655,10 @@ xfs_log_force_umount(
xlog_cil_force(log);
/*
- * We must hold both the GRANT lock and the LOG lock,
- * before we mark the filesystem SHUTDOWN and wake
- * everybody up to tell the bad news.
+ * mark the filesystem and the as in a shutdown state and wake
+ * everybody up to tell them the bad news.
*/
spin_lock(&log->l_icloglock);
- spin_lock(&log->l_grant_lock);
mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
if (mp->m_sb_bp)
XFS_BUF_DONE(mp->m_sb_bp);
@@ -3742,27 +3679,21 @@ xfs_log_force_umount(
spin_unlock(&log->l_icloglock);
/*
- * We don't want anybody waiting for log reservations
- * after this. That means we have to wake up everybody
- * queued up on reserve_headq as well as write_headq.
- * In addition, we make sure in xlog_{re}grant_log_space
- * that we don't enqueue anything once the SHUTDOWN flag
- * is set, and this action is protected by the GRANTLOCK.
+ * We don't want anybody waiting for log reservations after this. That
+ * means we have to wake up everybody queued up on reserveq as well as
+ * writeq. In addition, we make sure in xlog_{re}grant_log_space that
+ * we don't enqueue anything once the SHUTDOWN flag is set, and this
+ * action is protected by the grant locks.
*/
- if ((tic = log->l_reserve_headq)) {
- do {
- sv_signal(&tic->t_wait);
- tic = tic->t_next;
- } while (tic != log->l_reserve_headq);
- }
-
- if ((tic = log->l_write_headq)) {
- do {
- sv_signal(&tic->t_wait);
- tic = tic->t_next;
- } while (tic != log->l_write_headq);
- }
- spin_unlock(&log->l_grant_lock);
+ spin_lock(&log->l_grant_reserve_lock);
+ list_for_each_entry(tic, &log->l_reserveq, t_queue)
+ wake_up(&tic->t_wait);
+ spin_unlock(&log->l_grant_reserve_lock);
+
+ spin_lock(&log->l_grant_write_lock);
+ list_for_each_entry(tic, &log->l_writeq, t_queue)
+ wake_up(&tic->t_wait);
+ spin_unlock(&log->l_grant_write_lock);
if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
ASSERT(!logerror);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 23d6ceb5e97..9dc8125d04e 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -61,7 +61,7 @@ xlog_cil_init(
INIT_LIST_HEAD(&cil->xc_committing);
spin_lock_init(&cil->xc_cil_lock);
init_rwsem(&cil->xc_ctx_lock);
- sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait");
+ init_waitqueue_head(&cil->xc_commit_wait);
INIT_LIST_HEAD(&ctx->committing);
INIT_LIST_HEAD(&ctx->busy_extents);
@@ -361,15 +361,10 @@ xlog_cil_committed(
int abort)
{
struct xfs_cil_ctx *ctx = args;
- struct xfs_log_vec *lv;
- int abortflag = abort ? XFS_LI_ABORTED : 0;
struct xfs_busy_extent *busyp, *n;
- /* unpin all the log items */
- for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) {
- xfs_trans_item_committed(lv->lv_item, ctx->start_lsn,
- abortflag);
- }
+ xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
+ ctx->start_lsn, abort);
list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
@@ -568,7 +563,7 @@ restart:
* It is still being pushed! Wait for the push to
* complete, then start again from the beginning.
*/
- sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+ xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
goto restart;
}
}
@@ -592,7 +587,7 @@ restart:
*/
spin_lock(&cil->xc_cil_lock);
ctx->commit_lsn = commit_lsn;
- sv_broadcast(&cil->xc_commit_wait);
+ wake_up_all(&cil->xc_commit_wait);
spin_unlock(&cil->xc_cil_lock);
/* release the hounds! */
@@ -757,7 +752,7 @@ restart:
* It is still being pushed! Wait for the push to
* complete, then start again from the beginning.
*/
- sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+ xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
goto restart;
}
if (ctx->sequence != sequence)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index edcdfe01617..d5f8be8f4bf 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -21,7 +21,6 @@
struct xfs_buf;
struct log;
struct xlog_ticket;
-struct xfs_buf_cancel;
struct xfs_mount;
/*
@@ -54,7 +53,6 @@ struct xfs_mount;
BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
-
static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
{
return ((xfs_lsn_t)cycle << 32) | block;
@@ -133,12 +131,10 @@ static inline uint xlog_get_client_id(__be32 i)
*/
#define XLOG_TIC_INITED 0x1 /* has been initialized */
#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
-#define XLOG_TIC_IN_Q 0x4
#define XLOG_TIC_FLAGS \
{ XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
- { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \
- { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" }
+ { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
#endif /* __KERNEL__ */
@@ -244,9 +240,8 @@ typedef struct xlog_res {
} xlog_res_t;
typedef struct xlog_ticket {
- sv_t t_wait; /* ticket wait queue : 20 */
- struct xlog_ticket *t_next; /* :4|8 */
- struct xlog_ticket *t_prev; /* :4|8 */
+ wait_queue_head_t t_wait; /* ticket wait queue */
+ struct list_head t_queue; /* reserve/write queue */
xlog_tid_t t_tid; /* transaction identifier : 4 */
atomic_t t_ref; /* ticket reference count : 4 */
int t_curr_res; /* current reservation in bytes : 4 */
@@ -353,8 +348,8 @@ typedef union xlog_in_core2 {
* and move everything else out to subsequent cachelines.
*/
typedef struct xlog_in_core {
- sv_t ic_force_wait;
- sv_t ic_write_wait;
+ wait_queue_head_t ic_force_wait;
+ wait_queue_head_t ic_write_wait;
struct xlog_in_core *ic_next;
struct xlog_in_core *ic_prev;
struct xfs_buf *ic_bp;
@@ -421,7 +416,7 @@ struct xfs_cil {
struct xfs_cil_ctx *xc_ctx;
struct rw_semaphore xc_ctx_lock;
struct list_head xc_committing;
- sv_t xc_commit_wait;
+ wait_queue_head_t xc_commit_wait;
xfs_lsn_t xc_current_sequence;
};
@@ -491,7 +486,7 @@ typedef struct log {
struct xfs_buftarg *l_targ; /* buftarg of log */
uint l_flags;
uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
- struct xfs_buf_cancel **l_buf_cancel_table;
+ struct list_head *l_buf_cancel_table;
int l_iclog_hsize; /* size of iclog header */
int l_iclog_heads; /* # of iclog header sectors */
uint l_sectBBsize; /* sector size in BBs (2^n) */
@@ -503,29 +498,40 @@ typedef struct log {
int l_logBBsize; /* size of log in BB chunks */
/* The following block of fields are changed while holding icloglock */
- sv_t l_flush_wait ____cacheline_aligned_in_smp;
+ wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp;
/* waiting for iclog flush */
int l_covered_state;/* state of "covering disk
* log entries" */
xlog_in_core_t *l_iclog; /* head log queue */
spinlock_t l_icloglock; /* grab to change iclog state */
- xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed
- * buffers */
- xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */
int l_curr_cycle; /* Cycle number of log writes */
int l_prev_cycle; /* Cycle number before last
* block increment */
int l_curr_block; /* current logical log block */
int l_prev_block; /* previous logical log block */
- /* The following block of fields are changed while holding grant_lock */
- spinlock_t l_grant_lock ____cacheline_aligned_in_smp;
- xlog_ticket_t *l_reserve_headq;
- xlog_ticket_t *l_write_headq;
- int l_grant_reserve_cycle;
- int l_grant_reserve_bytes;
- int l_grant_write_cycle;
- int l_grant_write_bytes;
+ /*
+ * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
+ * read without needing to hold specific locks. To avoid operations
+ * contending with other hot objects, place each of them on a separate
+ * cacheline.
+ */
+ /* lsn of last LR on disk */
+ atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
+ /* lsn of 1st LR with unflushed * buffers */
+ atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
+
+ /*
+ * ticket grant locks, queues and accounting have their own cachlines
+ * as these are quite hot and can be operated on concurrently.
+ */
+ spinlock_t l_grant_reserve_lock ____cacheline_aligned_in_smp;
+ struct list_head l_reserveq;
+ atomic64_t l_grant_reserve_head;
+
+ spinlock_t l_grant_write_lock ____cacheline_aligned_in_smp;
+ struct list_head l_writeq;
+ atomic64_t l_grant_write_head;
/* The following field are used for debugging; need to hold icloglock */
#ifdef DEBUG
@@ -534,6 +540,9 @@ typedef struct log {
} xlog_t;
+#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
+ ((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
+
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
/* common routines */
@@ -562,6 +571,61 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
xlog_in_core_t **commit_iclog, uint flags);
/*
+ * When we crack an atomic LSN, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from. This should always
+ * be used to smaple and crack LSNs taht are stored and updated in atomic
+ * variables.
+ */
+static inline void
+xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
+{
+ xfs_lsn_t val = atomic64_read(lsn);
+
+ *cycle = CYCLE_LSN(val);
+ *block = BLOCK_LSN(val);
+}
+
+/*
+ * Calculate and assign a value to an atomic LSN variable from component pieces.
+ */
+static inline void
+xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
+{
+ atomic64_set(lsn, xlog_assign_lsn(cycle, block));
+}
+
+/*
+ * When we crack the grant head, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from.
+ */
+static inline void
+xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
+{
+ *cycle = val >> 32;
+ *space = val & 0xffffffff;
+}
+
+static inline void
+xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
+{
+ xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
+}
+
+static inline int64_t
+xlog_assign_grant_head_val(int cycle, int space)
+{
+ return ((int64_t)cycle << 32) | space;
+}
+
+static inline void
+xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
+{
+ atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
+}
+
+/*
* Committed Item List interfaces
*/
int xlog_cil_init(struct log *log);
@@ -585,6 +649,21 @@ xlog_cil_force(struct log *log)
*/
#define XLOG_UNMOUNT_REC_TYPE (-1U)
+/*
+ * Wrapper function for waiting on a wait queue serialised against wakeups
+ * by a spinlock. This matches the semantics of all the wait queues used in the
+ * log code.
+ */
+static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue_exclusive(wq, &wait);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(lock);
+ schedule();
+ remove_wait_queue(wq, &wait);
+}
#endif /* __KERNEL__ */
#endif /* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 966d3f97458..204d8e5fa7f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -53,6 +53,17 @@ STATIC void xlog_recover_check_summary(xlog_t *);
#endif
/*
+ * This structure is used during recovery to record the buf log items which
+ * have been canceled and should not be replayed.
+ */
+struct xfs_buf_cancel {
+ xfs_daddr_t bc_blkno;
+ uint bc_len;
+ int bc_refcount;
+ struct list_head bc_list;
+};
+
+/*
* Sector aligned buffer routines for buffer create/read/write/access
*/
@@ -925,12 +936,12 @@ xlog_find_tail(
log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
if (found == 2)
log->l_curr_cycle++;
- log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
- log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
- log->l_grant_reserve_cycle = log->l_curr_cycle;
- log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
- log->l_grant_write_cycle = log->l_curr_cycle;
- log->l_grant_write_bytes = BBTOB(log->l_curr_block);
+ atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
+ atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
+ xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
+ BBTOB(log->l_curr_block));
+ xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle,
+ BBTOB(log->l_curr_block));
/*
* Look for unmount record. If we find it, then we know there
@@ -960,7 +971,7 @@ xlog_find_tail(
}
after_umount_blk = (i + hblks + (int)
BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
- tail_lsn = log->l_tail_lsn;
+ tail_lsn = atomic64_read(&log->l_tail_lsn);
if (*head_blk == after_umount_blk &&
be32_to_cpu(rhead->h_num_logops) == 1) {
umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -975,12 +986,10 @@ xlog_find_tail(
* log records will point recovery to after the
* current unmount record.
*/
- log->l_tail_lsn =
- xlog_assign_lsn(log->l_curr_cycle,
- after_umount_blk);
- log->l_last_sync_lsn =
- xlog_assign_lsn(log->l_curr_cycle,
- after_umount_blk);
+ xlog_assign_atomic_lsn(&log->l_tail_lsn,
+ log->l_curr_cycle, after_umount_blk);
+ xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
+ log->l_curr_cycle, after_umount_blk);
*tail_blk = after_umount_blk;
/*
@@ -1605,82 +1614,45 @@ xlog_recover_reorder_trans(
* record in the table to tell us how many times we expect to see this
* record during the second pass.
*/
-STATIC void
-xlog_recover_do_buffer_pass1(
- xlog_t *log,
- xfs_buf_log_format_t *buf_f)
+STATIC int
+xlog_recover_buffer_pass1(
+ struct log *log,
+ xlog_recover_item_t *item)
{
- xfs_buf_cancel_t *bcp;
- xfs_buf_cancel_t *nextp;
- xfs_buf_cancel_t *prevp;
- xfs_buf_cancel_t **bucket;
- xfs_daddr_t blkno = 0;
- uint len = 0;
- ushort flags = 0;
-
- switch (buf_f->blf_type) {
- case XFS_LI_BUF:
- blkno = buf_f->blf_blkno;
- len = buf_f->blf_len;
- flags = buf_f->blf_flags;
- break;
- }
+ xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
+ struct list_head *bucket;
+ struct xfs_buf_cancel *bcp;
/*
* If this isn't a cancel buffer item, then just return.
*/
- if (!(flags & XFS_BLF_CANCEL)) {
+ if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
trace_xfs_log_recover_buf_not_cancel(log, buf_f);
- return;
- }
-
- /*
- * Insert an xfs_buf_cancel record into the hash table of
- * them. If there is already an identical record, bump
- * its reference count.
- */
- bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
- XLOG_BC_TABLE_SIZE];
- /*
- * If the hash bucket is empty then just insert a new record into
- * the bucket.
- */
- if (*bucket == NULL) {
- bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
- KM_SLEEP);
- bcp->bc_blkno = blkno;
- bcp->bc_len = len;
- bcp->bc_refcount = 1;
- bcp->bc_next = NULL;
- *bucket = bcp;
- return;
+ return 0;
}
/*
- * The hash bucket is not empty, so search for duplicates of our
- * record. If we find one them just bump its refcount. If not
- * then add us at the end of the list.
+ * Insert an xfs_buf_cancel record into the hash table of them.
+ * If there is already an identical record, bump its reference count.
*/
- prevp = NULL;
- nextp = *bucket;
- while (nextp != NULL) {
- if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
- nextp->bc_refcount++;
+ bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
+ list_for_each_entry(bcp, bucket, bc_list) {
+ if (bcp->bc_blkno == buf_f->blf_blkno &&
+ bcp->bc_len == buf_f->blf_len) {
+ bcp->bc_refcount++;
trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
- return;
+ return 0;
}
- prevp = nextp;
- nextp = nextp->bc_next;
- }
- ASSERT(prevp != NULL);
- bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
- KM_SLEEP);
- bcp->bc_blkno = blkno;
- bcp->bc_len = len;
+ }
+
+ bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
+ bcp->bc_blkno = buf_f->blf_blkno;
+ bcp->bc_len = buf_f->blf_len;
bcp->bc_refcount = 1;
- bcp->bc_next = NULL;
- prevp->bc_next = bcp;
+ list_add_tail(&bcp->bc_list, bucket);
+
trace_xfs_log_recover_buf_cancel_add(log, buf_f);
+ return 0;
}
/*
@@ -1698,14 +1670,13 @@ xlog_recover_do_buffer_pass1(
*/
STATIC int
xlog_check_buffer_cancelled(
- xlog_t *log,
+ struct log *log,
xfs_daddr_t blkno,
uint len,
ushort flags)
{
- xfs_buf_cancel_t *bcp;
- xfs_buf_cancel_t *prevp;
- xfs_buf_cancel_t **bucket;
+ struct list_head *bucket;
+ struct xfs_buf_cancel *bcp;
if (log->l_buf_cancel_table == NULL) {
/*
@@ -1716,128 +1687,70 @@ xlog_check_buffer_cancelled(
return 0;
}
- bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
- XLOG_BC_TABLE_SIZE];
- bcp = *bucket;
- if (bcp == NULL) {
- /*
- * There is no corresponding entry in the table built
- * in pass one, so this buffer has not been cancelled.
- */
- ASSERT(!(flags & XFS_BLF_CANCEL));
- return 0;
- }
-
/*
- * Search for an entry in the buffer cancel table that
- * matches our buffer.
+ * Search for an entry in the cancel table that matches our buffer.
*/
- prevp = NULL;
- while (bcp != NULL) {
- if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
- /*
- * We've go a match, so return 1 so that the
- * recovery of this buffer is cancelled.
- * If this buffer is actually a buffer cancel
- * log item, then decrement the refcount on the
- * one in the table and remove it if this is the
- * last reference.
- */
- if (flags & XFS_BLF_CANCEL) {
- bcp->bc_refcount--;
- if (bcp->bc_refcount == 0) {
- if (prevp == NULL) {
- *bucket = bcp->bc_next;
- } else {
- prevp->bc_next = bcp->bc_next;
- }
- kmem_free(bcp);
- }
- }
- return 1;
- }
- prevp = bcp;
- bcp = bcp->bc_next;
+ bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
+ list_for_each_entry(bcp, bucket, bc_list) {
+ if (bcp->bc_blkno == blkno && bcp->bc_len == len)
+ goto found;
}
+
/*
- * We didn't find a corresponding entry in the table, so
- * return 0 so that the buffer is NOT cancelled.
+ * We didn't find a corresponding entry in the table, so return 0 so
+ * that the buffer is NOT cancelled.
*/
ASSERT(!(flags & XFS_BLF_CANCEL));
return 0;
-}
-STATIC int
-xlog_recover_do_buffer_pass2(
- xlog_t *log,
- xfs_buf_log_format_t *buf_f)
-{
- xfs_daddr_t blkno = 0;
- ushort flags = 0;
- uint len = 0;
-
- switch (buf_f->blf_type) {
- case XFS_LI_BUF:
- blkno = buf_f->blf_blkno;
- flags = buf_f->blf_flags;
- len = buf_f->blf_len;
- break;
+found:
+ /*
+ * We've go a match, so return 1 so that the recovery of this buffer
+ * is cancelled. If this buffer is actually a buffer cancel log
+ * item, then decrement the refcount on the one in the table and
+ * remove it if this is the last reference.
+ */
+ if (flags & XFS_BLF_CANCEL) {
+ if (--bcp->bc_refcount == 0) {
+ list_del(&bcp->bc_list);
+ kmem_free(bcp);
+ }
}
-
- return xlog_check_buffer_cancelled(log, blkno, len, flags);
+ return 1;
}
/*
- * Perform recovery for a buffer full of inodes. In these buffers,
- * the only data which should be recovered is that which corresponds
- * to the di_next_unlinked pointers in the on disk inode structures.
- * The rest of the data for the inodes is always logged through the
- * inodes themselves rather than the inode buffer and is recovered
- * in xlog_recover_do_inode_trans().
+ * Perform recovery for a buffer full of inodes. In these buffers, the only
+ * data which should be recovered is that which corresponds to the
+ * di_next_unlinked pointers in the on disk inode structures. The rest of the
+ * data for the inodes is always logged through the inodes themselves rather
+ * than the inode buffer and is recovered in xlog_recover_inode_pass2().
*
- * The only time when buffers full of inodes are fully recovered is
- * when the buffer is full of newly allocated inodes. In this case
- * the buffer will not be marked as an inode buffer and so will be
- * sent to xlog_recover_do_reg_buffer() below during recovery.
+ * The only time when buffers full of inodes are fully recovered is when the
+ * buffer is full of newly allocated inodes. In this case the buffer will
+ * not be marked as an inode buffer and so will be sent to
+ * xlog_recover_do_reg_buffer() below during recovery.
*/
STATIC int
xlog_recover_do_inode_buffer(
- xfs_mount_t *mp,
+ struct xfs_mount *mp,
xlog_recover_item_t *item,
- xfs_buf_t *bp,
+ struct xfs_buf *bp,
xfs_buf_log_format_t *buf_f)
{
int i;
- int item_index;
- int bit;
- int nbits;
- int reg_buf_offset;
- int reg_buf_bytes;
+ int item_index = 0;
+ int bit = 0;
+ int nbits = 0;
+ int reg_buf_offset = 0;
+ int reg_buf_bytes = 0;
int next_unlinked_offset;
int inodes_per_buf;
xfs_agino_t *logged_nextp;
xfs_agino_t *buffer_nextp;
- unsigned int *data_map = NULL;
- unsigned int map_size = 0;
trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
- switch (buf_f->blf_type) {
- case XFS_LI_BUF:
- data_map = buf_f->blf_data_map;
- map_size = buf_f->blf_map_size;
- break;
- }
- /*
- * Set the variables corresponding to the current region to
- * 0 so that we'll initialize them on the first pass through
- * the loop.
- */
- reg_buf_offset = 0;
- reg_buf_bytes = 0;
- bit = 0;
- nbits = 0;
- item_index = 0;
inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
for (i = 0; i < inodes_per_buf; i++) {
next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
@@ -1852,18 +1765,18 @@ xlog_recover_do_inode_buffer(
* the current di_next_unlinked field.
*/
bit += nbits;
- bit = xfs_next_bit(data_map, map_size, bit);
+ bit = xfs_next_bit(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
/*
* If there are no more logged regions in the
* buffer, then we're done.
*/
- if (bit == -1) {
+ if (bit == -1)
return 0;
- }
- nbits = xfs_contig_bits(data_map, map_size,
- bit);
+ nbits = xfs_contig_bits(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
ASSERT(nbits > 0);
reg_buf_offset = bit << XFS_BLF_SHIFT;
reg_buf_bytes = nbits << XFS_BLF_SHIFT;
@@ -1875,9 +1788,8 @@ xlog_recover_do_inode_buffer(
* di_next_unlinked field, then move on to the next
* di_next_unlinked field.
*/
- if (next_unlinked_offset < reg_buf_offset) {
+ if (next_unlinked_offset < reg_buf_offset)
continue;
- }
ASSERT(item->ri_buf[item_index].i_addr != NULL);
ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
@@ -1913,36 +1825,29 @@ xlog_recover_do_inode_buffer(
* given buffer. The bitmap in the buf log format structure indicates
* where to place the logged data.
*/
-/*ARGSUSED*/
STATIC void
xlog_recover_do_reg_buffer(
struct xfs_mount *mp,
xlog_recover_item_t *item,
- xfs_buf_t *bp,
+ struct xfs_buf *bp,
xfs_buf_log_format_t *buf_f)
{
int i;
int bit;
int nbits;
- unsigned int *data_map = NULL;
- unsigned int map_size = 0;
int error;
trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
- switch (buf_f->blf_type) {
- case XFS_LI_BUF:
- data_map = buf_f->blf_data_map;
- map_size = buf_f->blf_map_size;
- break;
- }
bit = 0;
i = 1; /* 0 is the buf format structure */
while (1) {
- bit = xfs_next_bit(data_map, map_size, bit);
+ bit = xfs_next_bit(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
if (bit == -1)
break;
- nbits = xfs_contig_bits(data_map, map_size, bit);
+ nbits = xfs_contig_bits(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
ASSERT(nbits > 0);
ASSERT(item->ri_buf[i].i_addr != NULL);
ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
@@ -2176,77 +2081,46 @@ xlog_recover_do_dquot_buffer(
* for more details on the implementation of the table of cancel records.
*/
STATIC int
-xlog_recover_do_buffer_trans(
+xlog_recover_buffer_pass2(
xlog_t *log,
- xlog_recover_item_t *item,
- int pass)
+ xlog_recover_item_t *item)
{
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
- xfs_mount_t *mp;
+ xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
int error;
- int cancel;
- xfs_daddr_t blkno;
- int len;
- ushort flags;
uint buf_flags;
- if (pass == XLOG_RECOVER_PASS1) {
- /*
- * In this pass we're only looking for buf items
- * with the XFS_BLF_CANCEL bit set.
- */
- xlog_recover_do_buffer_pass1(log, buf_f);
+ /*
+ * In this pass we only want to recover all the buffers which have
+ * not been cancelled and are not cancellation buffers themselves.
+ */
+ if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
+ buf_f->blf_len, buf_f->blf_flags)) {
+ trace_xfs_log_recover_buf_cancel(log, buf_f);
return 0;
- } else {
- /*
- * In this pass we want to recover all the buffers
- * which have not been cancelled and are not
- * cancellation buffers themselves. The routine
- * we call here will tell us whether or not to
- * continue with the replay of this buffer.
- */
- cancel = xlog_recover_do_buffer_pass2(log, buf_f);
- if (cancel) {
- trace_xfs_log_recover_buf_cancel(log, buf_f);
- return 0;
- }
}
+
trace_xfs_log_recover_buf_recover(log, buf_f);
- switch (buf_f->blf_type) {
- case XFS_LI_BUF:
- blkno = buf_f->blf_blkno;
- len = buf_f->blf_len;
- flags = buf_f->blf_flags;
- break;
- default:
- xfs_fs_cmn_err(CE_ALERT, log->l_mp,
- "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
- buf_f->blf_type, log->l_mp->m_logname ?
- log->l_mp->m_logname : "internal");
- XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
- XFS_ERRLEVEL_LOW, log->l_mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- mp = log->l_mp;
buf_flags = XBF_LOCK;
- if (!(flags & XFS_BLF_INODE_BUF))
+ if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
buf_flags |= XBF_MAPPED;
- bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
+ bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+ buf_flags);
if (XFS_BUF_ISERROR(bp)) {
- xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
- bp, blkno);
+ xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
+ bp, buf_f->blf_blkno);
error = XFS_BUF_GETERROR(bp);
xfs_buf_relse(bp);
return error;
}
error = 0;
- if (flags & XFS_BLF_INODE_BUF) {
+ if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
- } else if (flags &
+ } else if (buf_f->blf_flags &
(XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
} else {
@@ -2286,16 +2160,14 @@ xlog_recover_do_buffer_trans(
}
STATIC int
-xlog_recover_do_inode_trans(
+xlog_recover_inode_pass2(
xlog_t *log,
- xlog_recover_item_t *item,
- int pass)
+ xlog_recover_item_t *item)
{
xfs_inode_log_format_t *in_f;
- xfs_mount_t *mp;
+ xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
xfs_dinode_t *dip;
- xfs_ino_t ino;
int len;
xfs_caddr_t src;
xfs_caddr_t dest;
@@ -2305,10 +2177,6 @@ xlog_recover_do_inode_trans(
xfs_icdinode_t *dicp;
int need_free = 0;
- if (pass == XLOG_RECOVER_PASS1) {
- return 0;
- }
-
if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
in_f = item->ri_buf[0].i_addr;
} else {
@@ -2318,8 +2186,6 @@ xlog_recover_do_inode_trans(
if (error)
goto error;
}
- ino = in_f->ilf_ino;
- mp = log->l_mp;
/*
* Inode buffers can be freed, look out for it,
@@ -2354,8 +2220,8 @@ xlog_recover_do_inode_trans(
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
- dip, bp, ino);
- XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
+ dip, bp, in_f->ilf_ino);
+ XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
XFS_ERRLEVEL_LOW, mp);
error = EFSCORRUPTED;
goto error;
@@ -2365,8 +2231,8 @@ xlog_recover_do_inode_trans(
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
- item, ino);
- XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
+ item, in_f->ilf_ino);
+ XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
XFS_ERRLEVEL_LOW, mp);
error = EFSCORRUPTED;
goto error;
@@ -2394,12 +2260,12 @@ xlog_recover_do_inode_trans(
if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
(dicp->di_format != XFS_DINODE_FMT_BTREE)) {
- XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
- item, dip, bp, ino);
+ item, dip, bp, in_f->ilf_ino);
error = EFSCORRUPTED;
goto error;
}
@@ -2407,40 +2273,40 @@ xlog_recover_do_inode_trans(
if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
(dicp->di_format != XFS_DINODE_FMT_BTREE) &&
(dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
- XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
- item, dip, bp, ino);
+ item, dip, bp, in_f->ilf_ino);
error = EFSCORRUPTED;
goto error;
}
}
if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
- XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
- item, dip, bp, ino,
+ item, dip, bp, in_f->ilf_ino,
dicp->di_nextents + dicp->di_anextents,
dicp->di_nblocks);
error = EFSCORRUPTED;
goto error;
}
if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
- XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
- item, dip, bp, ino, dicp->di_forkoff);
+ item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
error = EFSCORRUPTED;
goto error;
}
if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
- XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
@@ -2532,7 +2398,7 @@ xlog_recover_do_inode_trans(
break;
default:
- xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
+ xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag");
ASSERT(0);
xfs_buf_relse(bp);
error = EIO;
@@ -2556,18 +2422,11 @@ error:
* of that type.
*/
STATIC int
-xlog_recover_do_quotaoff_trans(
+xlog_recover_quotaoff_pass1(
xlog_t *log,
- xlog_recover_item_t *item,
- int pass)
+ xlog_recover_item_t *item)
{
- xfs_qoff_logformat_t *qoff_f;
-
- if (pass == XLOG_RECOVER_PASS2) {
- return (0);
- }
-
- qoff_f = item->ri_buf[0].i_addr;
+ xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
ASSERT(qoff_f);
/*
@@ -2588,22 +2447,17 @@ xlog_recover_do_quotaoff_trans(
* Recover a dquot record
*/
STATIC int
-xlog_recover_do_dquot_trans(
+xlog_recover_dquot_pass2(
xlog_t *log,
- xlog_recover_item_t *item,
- int pass)
+ xlog_recover_item_t *item)
{
- xfs_mount_t *mp;
+ xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
struct xfs_disk_dquot *ddq, *recddq;
int error;
xfs_dq_logformat_t *dq_f;
uint type;
- if (pass == XLOG_RECOVER_PASS1) {
- return 0;
- }
- mp = log->l_mp;
/*
* Filesystems are required to send in quota flags at mount time.
@@ -2647,7 +2501,7 @@ xlog_recover_do_dquot_trans(
if ((error = xfs_qm_dqcheck(recddq,
dq_f->qlf_id,
0, XFS_QMOPT_DOWARN,
- "xlog_recover_do_dquot_trans (log copy)"))) {
+ "xlog_recover_dquot_pass2 (log copy)"))) {
return XFS_ERROR(EIO);
}
ASSERT(dq_f->qlf_len == 1);
@@ -2670,7 +2524,7 @@ xlog_recover_do_dquot_trans(
* minimal initialization then.
*/
if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
- "xlog_recover_do_dquot_trans")) {
+ "xlog_recover_dquot_pass2")) {
xfs_buf_relse(bp);
return XFS_ERROR(EIO);
}
@@ -2693,38 +2547,31 @@ xlog_recover_do_dquot_trans(
* LSN.
*/
STATIC int
-xlog_recover_do_efi_trans(
+xlog_recover_efi_pass2(
xlog_t *log,
xlog_recover_item_t *item,
- xfs_lsn_t lsn,
- int pass)
+ xfs_lsn_t lsn)
{
int error;
- xfs_mount_t *mp;
+ xfs_mount_t *mp = log->l_mp;
xfs_efi_log_item_t *efip;
xfs_efi_log_format_t *efi_formatp;
- if (pass == XLOG_RECOVER_PASS1) {
- return 0;
- }
-
efi_formatp = item->ri_buf[0].i_addr;
- mp = log->l_mp;
efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
&(efip->efi_format)))) {
xfs_efi_item_free(efip);
return error;
}
- efip->efi_next_extent = efi_formatp->efi_nextents;
- efip->efi_flags |= XFS_EFI_COMMITTED;
+ atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
spin_lock(&log->l_ailp->xa_lock);
/*
* xfs_trans_ail_update() drops the AIL lock.
*/
- xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
+ xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
return 0;
}
@@ -2737,11 +2584,10 @@ xlog_recover_do_efi_trans(
* efd format structure. If we find it, we remove the efi from the
* AIL and free it.
*/
-STATIC void
-xlog_recover_do_efd_trans(
+STATIC int
+xlog_recover_efd_pass2(
xlog_t *log,
- xlog_recover_item_t *item,
- int pass)
+ xlog_recover_item_t *item)
{
xfs_efd_log_format_t *efd_formatp;
xfs_efi_log_item_t *efip = NULL;
@@ -2750,10 +2596,6 @@ xlog_recover_do_efd_trans(
struct xfs_ail_cursor cur;
struct xfs_ail *ailp = log->l_ailp;
- if (pass == XLOG_RECOVER_PASS1) {
- return;
- }
-
efd_formatp = item->ri_buf[0].i_addr;
ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
@@ -2785,62 +2627,6 @@ xlog_recover_do_efd_trans(
}
xfs_trans_ail_cursor_done(ailp, &cur);
spin_unlock(&ailp->xa_lock);
-}
-
-/*
- * Perform the transaction
- *
- * If the transaction modifies a buffer or inode, do it now. Otherwise,
- * EFIs and EFDs get queued up by adding entries into the AIL for them.
- */
-STATIC int
-xlog_recover_do_trans(
- xlog_t *log,
- xlog_recover_t *trans,
- int pass)
-{
- int error = 0;
- xlog_recover_item_t *item;
-
- error = xlog_recover_reorder_trans(log, trans, pass);
- if (error)
- return error;
-
- list_for_each_entry(item, &trans->r_itemq, ri_list) {
- trace_xfs_log_recover_item_recover(log, trans, item, pass);
- switch (ITEM_TYPE(item)) {
- case XFS_LI_BUF:
- error = xlog_recover_do_buffer_trans(log, item, pass);
- break;
- case XFS_LI_INODE:
- error = xlog_recover_do_inode_trans(log, item, pass);
- break;
- case XFS_LI_EFI:
- error = xlog_recover_do_efi_trans(log, item,
- trans->r_lsn, pass);
- break;
- case XFS_LI_EFD:
- xlog_recover_do_efd_trans(log, item, pass);
- error = 0;
- break;
- case XFS_LI_DQUOT:
- error = xlog_recover_do_dquot_trans(log, item, pass);
- break;
- case XFS_LI_QUOTAOFF:
- error = xlog_recover_do_quotaoff_trans(log, item,
- pass);
- break;
- default:
- xlog_warn(
- "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
- ASSERT(0);
- error = XFS_ERROR(EIO);
- break;
- }
-
- if (error)
- return error;
- }
return 0;
}
@@ -2852,7 +2638,7 @@ xlog_recover_do_trans(
*/
STATIC void
xlog_recover_free_trans(
- xlog_recover_t *trans)
+ struct xlog_recover *trans)
{
xlog_recover_item_t *item, *n;
int i;
@@ -2871,17 +2657,95 @@ xlog_recover_free_trans(
}
STATIC int
+xlog_recover_commit_pass1(
+ struct log *log,
+ struct xlog_recover *trans,
+ xlog_recover_item_t *item)
+{
+ trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
+
+ switch (ITEM_TYPE(item)) {
+ case XFS_LI_BUF:
+ return xlog_recover_buffer_pass1(log, item);
+ case XFS_LI_QUOTAOFF:
+ return xlog_recover_quotaoff_pass1(log, item);
+ case XFS_LI_INODE:
+ case XFS_LI_EFI:
+ case XFS_LI_EFD:
+ case XFS_LI_DQUOT:
+ /* nothing to do in pass 1 */
+ return 0;
+ default:
+ xlog_warn(
+ "XFS: invalid item type (%d) xlog_recover_commit_pass1",
+ ITEM_TYPE(item));
+ ASSERT(0);
+ return XFS_ERROR(EIO);
+ }
+}
+
+STATIC int
+xlog_recover_commit_pass2(
+ struct log *log,
+ struct xlog_recover *trans,
+ xlog_recover_item_t *item)
+{
+ trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
+
+ switch (ITEM_TYPE(item)) {
+ case XFS_LI_BUF:
+ return xlog_recover_buffer_pass2(log, item);
+ case XFS_LI_INODE:
+ return xlog_recover_inode_pass2(log, item);
+ case XFS_LI_EFI:
+ return xlog_recover_efi_pass2(log, item, trans->r_lsn);
+ case XFS_LI_EFD:
+ return xlog_recover_efd_pass2(log, item);
+ case XFS_LI_DQUOT:
+ return xlog_recover_dquot_pass2(log, item);
+ case XFS_LI_QUOTAOFF:
+ /* nothing to do in pass2 */
+ return 0;
+ default:
+ xlog_warn(
+ "XFS: invalid item type (%d) xlog_recover_commit_pass2",
+ ITEM_TYPE(item));
+ ASSERT(0);
+ return XFS_ERROR(EIO);
+ }
+}
+
+/*
+ * Perform the transaction.
+ *
+ * If the transaction modifies a buffer or inode, do it now. Otherwise,
+ * EFIs and EFDs get queued up by adding entries into the AIL for them.
+ */
+STATIC int
xlog_recover_commit_trans(
- xlog_t *log,
- xlog_recover_t *trans,
+ struct log *log,
+ struct xlog_recover *trans,
int pass)
{
- int error;
+ int error = 0;
+ xlog_recover_item_t *item;
hlist_del(&trans->r_list);
- if ((error = xlog_recover_do_trans(log, trans, pass)))
+
+ error = xlog_recover_reorder_trans(log, trans, pass);
+ if (error)
return error;
- xlog_recover_free_trans(trans); /* no error */
+
+ list_for_each_entry(item, &trans->r_itemq, ri_list) {
+ if (pass == XLOG_RECOVER_PASS1)
+ error = xlog_recover_commit_pass1(log, trans, item);
+ else
+ error = xlog_recover_commit_pass2(log, trans, item);
+ if (error)
+ return error;
+ }
+
+ xlog_recover_free_trans(trans);
return 0;
}
@@ -3011,7 +2875,7 @@ xlog_recover_process_efi(
xfs_extent_t *extp;
xfs_fsblock_t startblock_fsb;
- ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
+ ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
/*
* First check the validity of the extents described by the
@@ -3050,7 +2914,7 @@ xlog_recover_process_efi(
extp->ext_len);
}
- efip->efi_flags |= XFS_EFI_RECOVERED;
+ set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
error = xfs_trans_commit(tp, 0);
return error;
@@ -3107,7 +2971,7 @@ xlog_recover_process_efis(
* Skip EFIs that we've already processed.
*/
efip = (xfs_efi_log_item_t *)lip;
- if (efip->efi_flags & XFS_EFI_RECOVERED) {
+ if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
lip = xfs_trans_ail_cursor_next(ailp, &cur);
continue;
}
@@ -3724,7 +3588,7 @@ xlog_do_log_recovery(
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk)
{
- int error;
+ int error, i;
ASSERT(head_blk != tail_blk);
@@ -3732,10 +3596,12 @@ xlog_do_log_recovery(
* First do a pass to find all of the cancelled buf log items.
* Store them in the buf_cancel_table for use in the second pass.
*/
- log->l_buf_cancel_table =
- (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
- sizeof(xfs_buf_cancel_t*),
+ log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
+ sizeof(struct list_head),
KM_SLEEP);
+ for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+ INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+
error = xlog_do_recovery_pass(log, head_blk, tail_blk,
XLOG_RECOVER_PASS1);
if (error != 0) {
@@ -3754,7 +3620,7 @@ xlog_do_log_recovery(
int i;
for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
- ASSERT(log->l_buf_cancel_table[i] == NULL);
+ ASSERT(list_empty(&log->l_buf_cancel_table[i]));
}
#endif /* DEBUG */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 19e9dfa1c25..d447aef84bc 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -472,7 +472,7 @@ xfs_initialize_perag(
goto out_unwind;
pag->pag_agno = index;
pag->pag_mount = mp;
- rwlock_init(&pag->pag_ici_lock);
+ spin_lock_init(&pag->pag_ici_lock);
mutex_init(&pag->pag_ici_reclaim_lock);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
spin_lock_init(&pag->pag_buf_lock);
@@ -975,6 +975,24 @@ xfs_set_rw_sizes(xfs_mount_t *mp)
}
/*
+ * precalculate the low space thresholds for dynamic speculative preallocation.
+ */
+void
+xfs_set_low_space_thresholds(
+ struct xfs_mount *mp)
+{
+ int i;
+
+ for (i = 0; i < XFS_LOWSP_MAX; i++) {
+ __uint64_t space = mp->m_sb.sb_dblocks;
+
+ do_div(space, 100);
+ mp->m_low_space[i] = space * (i + 1);
+ }
+}
+
+
+/*
* Set whether we're using inode alignment.
*/
STATIC void
@@ -1196,6 +1214,9 @@ xfs_mountfs(
*/
xfs_set_rw_sizes(mp);
+ /* set the low space thresholds for dynamic preallocation */
+ xfs_set_low_space_thresholds(mp);
+
/*
* Set the inode cluster size.
* This may still be overridden by the file system
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5861b498074..a62e8971539 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -103,6 +103,16 @@ extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t,
xfs_mod_incore_sb(mp, field, delta, rsvd)
#endif
+/* dynamic preallocation free space thresholds, 5% down to 1% */
+enum {
+ XFS_LOWSP_1_PCNT = 0,
+ XFS_LOWSP_2_PCNT,
+ XFS_LOWSP_3_PCNT,
+ XFS_LOWSP_4_PCNT,
+ XFS_LOWSP_5_PCNT,
+ XFS_LOWSP_MAX,
+};
+
typedef struct xfs_mount {
struct super_block *m_super;
xfs_tid_t m_tid; /* next unused tid for fs */
@@ -202,6 +212,8 @@ typedef struct xfs_mount {
__int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */
struct shrinker m_inode_shrink; /* inode reclaim shrinker */
+ int64_t m_low_space[XFS_LOWSP_MAX];
+ /* low free space thresholds */
} xfs_mount_t;
/*
@@ -379,6 +391,8 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
+extern void xfs_set_low_space_thresholds(struct xfs_mount *);
+
#endif /* __KERNEL__ */
extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f6d956b7711..f80a067a465 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1350,7 +1350,7 @@ xfs_trans_fill_vecs(
* they could be immediately flushed and we'd have to race with the flusher
* trying to pull the item from the AIL as we add it.
*/
-void
+static void
xfs_trans_item_committed(
struct xfs_log_item *lip,
xfs_lsn_t commit_lsn,
@@ -1425,6 +1425,83 @@ xfs_trans_committed(
xfs_trans_free(tp);
}
+static inline void
+xfs_log_item_batch_insert(
+ struct xfs_ail *ailp,
+ struct xfs_log_item **log_items,
+ int nr_items,
+ xfs_lsn_t commit_lsn)
+{
+ int i;
+
+ spin_lock(&ailp->xa_lock);
+ /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
+ xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
+
+ for (i = 0; i < nr_items; i++)
+ IOP_UNPIN(log_items[i], 0);
+}
+
+/*
+ * Bulk operation version of xfs_trans_committed that takes a log vector of
+ * items to insert into the AIL. This uses bulk AIL insertion techniques to
+ * minimise lock traffic.
+ */
+void
+xfs_trans_committed_bulk(
+ struct xfs_ail *ailp,
+ struct xfs_log_vec *log_vector,
+ xfs_lsn_t commit_lsn,
+ int aborted)
+{
+#define LOG_ITEM_BATCH_SIZE 32
+ struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
+ struct xfs_log_vec *lv;
+ int i = 0;
+
+ /* unpin all the log items */
+ for (lv = log_vector; lv; lv = lv->lv_next ) {
+ struct xfs_log_item *lip = lv->lv_item;
+ xfs_lsn_t item_lsn;
+
+ if (aborted)
+ lip->li_flags |= XFS_LI_ABORTED;
+ item_lsn = IOP_COMMITTED(lip, commit_lsn);
+
+ /* item_lsn of -1 means the item was freed */
+ if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+ continue;
+
+ if (item_lsn != commit_lsn) {
+
+ /*
+ * Not a bulk update option due to unusual item_lsn.
+ * Push into AIL immediately, rechecking the lsn once
+ * we have the ail lock. Then unpin the item.
+ */
+ spin_lock(&ailp->xa_lock);
+ if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
+ xfs_trans_ail_update(ailp, lip, item_lsn);
+ else
+ spin_unlock(&ailp->xa_lock);
+ IOP_UNPIN(lip, 0);
+ continue;
+ }
+
+ /* Item is a candidate for bulk AIL insert. */
+ log_items[i++] = lv->lv_item;
+ if (i >= LOG_ITEM_BATCH_SIZE) {
+ xfs_log_item_batch_insert(ailp, log_items,
+ LOG_ITEM_BATCH_SIZE, commit_lsn);
+ i = 0;
+ }
+ }
+
+ /* make sure we insert the remainder! */
+ if (i)
+ xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
+}
+
/*
* Called from the trans_commit code when we notice that
* the filesystem is in the middle of a forced shutdown.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 246286b77a8..c2042b736b8 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -294,8 +294,8 @@ struct xfs_log_item_desc {
#define XFS_ALLOC_BTREE_REF 2
#define XFS_BMAP_BTREE_REF 2
#define XFS_DIR_BTREE_REF 2
+#define XFS_INO_REF 2
#define XFS_ATTR_BTREE_REF 1
-#define XFS_INO_REF 1
#define XFS_DQUOT_REF 1
#ifdef __KERNEL__
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index dc9069568ff..c5bbbc45db9 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,8 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
-STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
+STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
+STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
@@ -449,129 +449,152 @@ xfs_trans_unlocked_item(
xfs_log_move_tail(ailp->xa_mount, 1);
} /* xfs_trans_unlocked_item */
-
/*
- * Update the position of the item in the AIL with the new
- * lsn. If it is not yet in the AIL, add it. Otherwise, move
- * it to its new position by removing it and re-adding it.
+ * xfs_trans_ail_update - bulk AIL insertion operation.
+ *
+ * @xfs_trans_ail_update takes an array of log items that all need to be
+ * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
+ * be added. Otherwise, it will be repositioned by removing it and re-adding
+ * it to the AIL. If we move the first item in the AIL, update the log tail to
+ * match the new minimum LSN in the AIL.
*
- * Wakeup anyone with an lsn less than the item's lsn. If the item
- * we move in the AIL is the minimum one, update the tail lsn in the
- * log manager.
+ * This function takes the AIL lock once to execute the update operations on
+ * all the items in the array, and as such should not be called with the AIL
+ * lock held. As a result, once we have the AIL lock, we need to check each log
+ * item LSN to confirm it needs to be moved forward in the AIL.
*
- * This function must be called with the AIL lock held. The lock
- * is dropped before returning.
+ * To optimise the insert operation, we delete all the items from the AIL in
+ * the first pass, moving them into a temporary list, then splice the temporary
+ * list into the correct position in the AIL. This avoids needing to do an
+ * insert operation on every item.
+ *
+ * This function must be called with the AIL lock held. The lock is dropped
+ * before returning.
*/
void
-xfs_trans_ail_update(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip,
- xfs_lsn_t lsn) __releases(ailp->xa_lock)
+xfs_trans_ail_update_bulk(
+ struct xfs_ail *ailp,
+ struct xfs_log_item **log_items,
+ int nr_items,
+ xfs_lsn_t lsn) __releases(ailp->xa_lock)
{
- xfs_log_item_t *dlip = NULL;
- xfs_log_item_t *mlip; /* ptr to minimum lip */
+ xfs_log_item_t *mlip;
xfs_lsn_t tail_lsn;
+ int mlip_changed = 0;
+ int i;
+ LIST_HEAD(tmp);
mlip = xfs_ail_min(ailp);
- if (lip->li_flags & XFS_LI_IN_AIL) {
- dlip = xfs_ail_delete(ailp, lip);
- ASSERT(dlip == lip);
- xfs_trans_ail_cursor_clear(ailp, dlip);
- } else {
- lip->li_flags |= XFS_LI_IN_AIL;
+ for (i = 0; i < nr_items; i++) {
+ struct xfs_log_item *lip = log_items[i];
+ if (lip->li_flags & XFS_LI_IN_AIL) {
+ /* check if we really need to move the item */
+ if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
+ continue;
+
+ xfs_ail_delete(ailp, lip);
+ if (mlip == lip)
+ mlip_changed = 1;
+ } else {
+ lip->li_flags |= XFS_LI_IN_AIL;
+ }
+ lip->li_lsn = lsn;
+ list_add(&lip->li_ail, &tmp);
}
- lip->li_lsn = lsn;
- xfs_ail_insert(ailp, lip);
+ xfs_ail_splice(ailp, &tmp, lsn);
- if (mlip == dlip) {
- mlip = xfs_ail_min(ailp);
- /*
- * It is not safe to access mlip after the AIL lock is
- * dropped, so we must get a copy of li_lsn before we do
- * so. This is especially important on 32-bit platforms
- * where accessing and updating 64-bit values like li_lsn
- * is not atomic.
- */
- tail_lsn = mlip->li_lsn;
- spin_unlock(&ailp->xa_lock);
- xfs_log_move_tail(ailp->xa_mount, tail_lsn);
- } else {
+ if (!mlip_changed) {
spin_unlock(&ailp->xa_lock);
+ return;
}
-
-} /* xfs_trans_update_ail */
+ /*
+ * It is not safe to access mlip after the AIL lock is dropped, so we
+ * must get a copy of li_lsn before we do so. This is especially
+ * important on 32-bit platforms where accessing and updating 64-bit
+ * values like li_lsn is not atomic.
+ */
+ mlip = xfs_ail_min(ailp);
+ tail_lsn = mlip->li_lsn;
+ spin_unlock(&ailp->xa_lock);
+ xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+}
/*
- * Delete the given item from the AIL. It must already be in
- * the AIL.
+ * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
*
- * Wakeup anyone with an lsn less than item's lsn. If the item
- * we delete in the AIL is the minimum one, update the tail lsn in the
- * log manager.
+ * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
+ * removed from the AIL. The caller is already holding the AIL lock, and done
+ * all the checks necessary to ensure the items passed in via @log_items are
+ * ready for deletion. This includes checking that the items are in the AIL.
*
- * Clear the IN_AIL flag from the item, reset its lsn to 0, and
- * bump the AIL's generation count to indicate that the tree
- * has changed.
+ * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
+ * flag from the item and reset the item's lsn to 0. If we remove the first
+ * item in the AIL, update the log tail to match the new minimum LSN in the
+ * AIL.
*
- * This function must be called with the AIL lock held. The lock
- * is dropped before returning.
+ * This function will not drop the AIL lock until all items are removed from
+ * the AIL to minimise the amount of lock traffic on the AIL. This does not
+ * greatly increase the AIL hold time, but does significantly reduce the amount
+ * of traffic on the lock, especially during IO completion.
+ *
+ * This function must be called with the AIL lock held. The lock is dropped
+ * before returning.
*/
void
-xfs_trans_ail_delete(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip) __releases(ailp->xa_lock)
+xfs_trans_ail_delete_bulk(
+ struct xfs_ail *ailp,
+ struct xfs_log_item **log_items,
+ int nr_items) __releases(ailp->xa_lock)
{
- xfs_log_item_t *dlip;
xfs_log_item_t *mlip;
xfs_lsn_t tail_lsn;
+ int mlip_changed = 0;
+ int i;
- if (lip->li_flags & XFS_LI_IN_AIL) {
- mlip = xfs_ail_min(ailp);
- dlip = xfs_ail_delete(ailp, lip);
- ASSERT(dlip == lip);
- xfs_trans_ail_cursor_clear(ailp, dlip);
-
+ mlip = xfs_ail_min(ailp);
- lip->li_flags &= ~XFS_LI_IN_AIL;
- lip->li_lsn = 0;
+ for (i = 0; i < nr_items; i++) {
+ struct xfs_log_item *lip = log_items[i];
+ if (!(lip->li_flags & XFS_LI_IN_AIL)) {
+ struct xfs_mount *mp = ailp->xa_mount;
- if (mlip == dlip) {
- mlip = xfs_ail_min(ailp);
- /*
- * It is not safe to access mlip after the AIL lock
- * is dropped, so we must get a copy of li_lsn
- * before we do so. This is especially important
- * on 32-bit platforms where accessing and updating
- * 64-bit values like li_lsn is not atomic.
- */
- tail_lsn = mlip ? mlip->li_lsn : 0;
- spin_unlock(&ailp->xa_lock);
- xfs_log_move_tail(ailp->xa_mount, tail_lsn);
- } else {
spin_unlock(&ailp->xa_lock);
+ if (!XFS_FORCED_SHUTDOWN(mp)) {
+ xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
+ "%s: attempting to delete a log item that is not in the AIL",
+ __func__);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ }
+ return;
}
+
+ xfs_ail_delete(ailp, lip);
+ lip->li_flags &= ~XFS_LI_IN_AIL;
+ lip->li_lsn = 0;
+ if (mlip == lip)
+ mlip_changed = 1;
}
- else {
- /*
- * If the file system is not being shutdown, we are in
- * serious trouble if we get to this stage.
- */
- struct xfs_mount *mp = ailp->xa_mount;
+ if (!mlip_changed) {
spin_unlock(&ailp->xa_lock);
- if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
- "%s: attempting to delete a log item that is not in the AIL",
- __func__);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- }
+ return;
}
-}
-
+ /*
+ * It is not safe to access mlip after the AIL lock is dropped, so we
+ * must get a copy of li_lsn before we do so. This is especially
+ * important on 32-bit platforms where accessing and updating 64-bit
+ * values like li_lsn is not atomic. It is possible we've emptied the
+ * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
+ */
+ mlip = xfs_ail_min(ailp);
+ tail_lsn = mlip ? mlip->li_lsn : 0;
+ spin_unlock(&ailp->xa_lock);
+ xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+}
/*
* The active item list (AIL) is a doubly linked list of log
@@ -623,16 +646,13 @@ xfs_trans_ail_destroy(
}
/*
- * Insert the given log item into the AIL.
- * We almost always insert at the end of the list, so on inserts
- * we search from the end of the list to find where the
- * new item belongs.
+ * splice the log item list into the AIL at the given LSN.
*/
STATIC void
-xfs_ail_insert(
+xfs_ail_splice(
struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-/* ARGSUSED */
+ struct list_head *list,
+ xfs_lsn_t lsn)
{
xfs_log_item_t *next_lip;
@@ -640,39 +660,33 @@ xfs_ail_insert(
* If the list is empty, just insert the item.
*/
if (list_empty(&ailp->xa_ail)) {
- list_add(&lip->li_ail, &ailp->xa_ail);
+ list_splice(list, &ailp->xa_ail);
return;
}
list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
- if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)
+ if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
break;
}
ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
- (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
-
- list_add(&lip->li_ail, &next_lip->li_ail);
+ (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
- xfs_ail_check(ailp, lip);
+ list_splice_init(list, &next_lip->li_ail);
return;
}
/*
* Delete the given item from the AIL. Return a pointer to the item.
*/
-/*ARGSUSED*/
-STATIC xfs_log_item_t *
+STATIC void
xfs_ail_delete(
struct xfs_ail *ailp,
xfs_log_item_t *lip)
-/* ARGSUSED */
{
xfs_ail_check(ailp, lip);
-
list_del(&lip->li_ail);
-
- return lip;
+ xfs_trans_ail_cursor_clear(ailp, lip);
}
/*
@@ -682,7 +696,6 @@ xfs_ail_delete(
STATIC xfs_log_item_t *
xfs_ail_min(
struct xfs_ail *ailp)
-/* ARGSUSED */
{
if (list_empty(&ailp->xa_ail))
return NULL;
@@ -699,7 +712,6 @@ STATIC xfs_log_item_t *
xfs_ail_next(
struct xfs_ail *ailp,
xfs_log_item_t *lip)
-/* ARGSUSED */
{
if (lip->li_ail.next == &ailp->xa_ail)
return NULL;
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index f783d5e9fa7..f7590f5bade 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -69,12 +69,16 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp,
tp->t_flags |= XFS_TRANS_DIRTY;
efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
- next_extent = efip->efi_next_extent;
+ /*
+ * atomic_inc_return gives us the value after the increment;
+ * we want to use it as an array index so we need to subtract 1 from
+ * it.
+ */
+ next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
ASSERT(next_extent < efip->efi_format.efi_nextents);
extp = &(efip->efi_format.efi_extents[next_extent]);
extp->ext_start = start_block;
extp->ext_len = ext_len;
- efip->efi_next_extent++;
}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 62da86c90de..35162c238fa 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -22,15 +22,17 @@ struct xfs_log_item;
struct xfs_log_item_desc;
struct xfs_mount;
struct xfs_trans;
+struct xfs_ail;
+struct xfs_log_vec;
void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
void xfs_trans_del_item(struct xfs_log_item *);
void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
int flags);
-void xfs_trans_item_committed(struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn, int aborted);
void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
+void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
+ xfs_lsn_t commit_lsn, int aborted);
/*
* AIL traversal cursor.
*
@@ -73,12 +75,29 @@ struct xfs_ail {
/*
* From xfs_trans_ail.c
*/
-void xfs_trans_ail_update(struct xfs_ail *ailp,
- struct xfs_log_item *lip, xfs_lsn_t lsn)
- __releases(ailp->xa_lock);
-void xfs_trans_ail_delete(struct xfs_ail *ailp,
- struct xfs_log_item *lip)
- __releases(ailp->xa_lock);
+void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
+ struct xfs_log_item **log_items, int nr_items,
+ xfs_lsn_t lsn) __releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_update(
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn) __releases(ailp->xa_lock)
+{
+ xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
+}
+
+void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
+ struct xfs_log_item **log_items, int nr_items)
+ __releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_delete(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip) __releases(ailp->xa_lock)
+{
+ xfs_trans_ail_delete_bulk(ailp, &lip, 1);
+}
+
void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
void xfs_trans_unlocked_item(struct xfs_ail *,
xfs_log_item_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 8e4a63c4151..d8e6f8cd6f0 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -964,29 +964,48 @@ xfs_release(
xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
}
- if (ip->i_d.di_nlink != 0) {
- if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
- ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
- ip->i_delayed_blks > 0)) &&
- (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
- (!(ip->i_d.di_flags &
- (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+ if (ip->i_d.di_nlink == 0)
+ return 0;
- /*
- * If we can't get the iolock just skip truncating
- * the blocks past EOF because we could deadlock
- * with the mmap_sem otherwise. We'll get another
- * chance to drop them once the last reference to
- * the inode is dropped, so we'll never leak blocks
- * permanently.
- */
- error = xfs_free_eofblocks(mp, ip,
- XFS_FREE_EOF_TRYLOCK);
- if (error)
- return error;
- }
- }
+ if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
+ ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
+ ip->i_delayed_blks > 0)) &&
+ (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
+ (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+ /*
+ * If we can't get the iolock just skip truncating the blocks
+ * past EOF because we could deadlock with the mmap_sem
+ * otherwise. We'll get another chance to drop them once the
+ * last reference to the inode is dropped, so we'll never leak
+ * blocks permanently.
+ *
+ * Further, check if the inode is being opened, written and
+ * closed frequently and we have delayed allocation blocks
+ * oustanding (e.g. streaming writes from the NFS server),
+ * truncating the blocks past EOF will cause fragmentation to
+ * occur.
+ *
+ * In this case don't do the truncation, either, but we have to
+ * be careful how we detect this case. Blocks beyond EOF show
+ * up as i_delayed_blks even when the inode is clean, so we
+ * need to truncate them away first before checking for a dirty
+ * release. Hence on the first dirty close we will still remove
+ * the speculative allocation, but after that we will leave it
+ * in place.
+ */
+ if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
+ return 0;
+
+ error = xfs_free_eofblocks(mp, ip,
+ XFS_FREE_EOF_TRYLOCK);
+ if (error)
+ return error;
+
+ /* delalloc blocks after truncation means it really is dirty */
+ if (ip->i_delayed_blks)
+ xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
+ }
return 0;
}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 3577ca11a0b..4644c9a7f72 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -211,6 +211,36 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
}
#endif
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+{
+ insl((unsigned long)addr, buf, len);
+}
+
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+{
+ insw((unsigned long)addr, buf, len);
+}
+
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+{
+ insb((unsigned long)addr, buf, len);
+}
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+{
+ outsl((unsigned long)addr, buf, len);
+}
+
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+{
+ outsw((unsigned long)addr, buf, len);
+}
+
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+{
+ outsb((unsigned long)addr, buf, len);
+}
+
#ifndef CONFIG_GENERIC_IOMAP
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 274eaaa15c3..a4694c61033 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -683,6 +683,21 @@ struct drm_master {
void *driver_priv; /**< Private structure for driver to use */
};
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -760,6 +775,68 @@ struct drm_driver {
*/
int (*device_is_agp) (struct drm_device *dev);
+ /**
+ * Called by vblank timestamping code.
+ *
+ * Return the current display scanout position from a crtc.
+ *
+ * \param dev DRM device.
+ * \param crtc Id of the crtc to query.
+ * \param *vpos Target location for current vertical scanout position.
+ * \param *hpos Target location for current horizontal scanout position.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant
+ * but unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
+
+ /**
+ * Called by \c drm_get_last_vbltimestamp. Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immmediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * \param dev dev DRM device handle.
+ * \param crtc crtc for which timestamp should be returned.
+ * \param *max_error Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most *max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * \param *vblank_time Target location for returned vblank timestamp.
+ * \param flags 0 = Defaults, no special treatment needed.
+ * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * \returns
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
/* these have to be filled in */
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
@@ -983,6 +1060,8 @@ struct drm_device {
wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+ spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
spinlock_t vbl_lock;
atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
u32 *last_vblank; /* protected by dev->vbl_lock, used */
@@ -1041,12 +1120,14 @@ struct drm_device {
/*@{ */
spinlock_t object_name_lock;
struct idr object_name_idr;
- uint32_t invalidate_domains; /* domains pending invalidation */
- uint32_t flush_domains; /* domains pending flush */
/*@} */
-
+ int switch_power_state;
};
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
@@ -1284,11 +1365,22 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime);
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags);
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ int crtc, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1321,7 +1413,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void drm_agp_chipset_flush(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1340,6 +1431,9 @@ extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern unsigned int drm_debug;
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+
extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern struct dentry *drm_debugfs_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 029aa688e78..acd7fade160 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -351,8 +351,14 @@ struct drm_crtc {
bool enabled;
+ /* Requested mode from modesetting. */
struct drm_display_mode mode;
+ /* Programmed mode in hw, after adjustments for encoders,
+ * crtc, panel scaling etc. Needed for timestamping etc.
+ */
+ struct drm_display_mode hwmode;
+
int x, y;
const struct drm_crtc_funcs *funcs;
@@ -360,6 +366,9 @@ struct drm_crtc {
uint32_t gamma_size;
uint16_t *gamma_store;
+ /* Constants needed for precise vblank and swap timestamping. */
+ s64 framedur_ns, linedur_ns, pixeldur_ns;
+
/* if you are using the helper */
void *helper_private;
};
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f22e7fe4b6d..aac27bd56e8 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -121,9 +121,6 @@ int drm_fb_helper_setcolreg(unsigned regno,
void drm_fb_helper_restore(void);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth);
-
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index bf01531193d..e3917777860 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -62,11 +62,14 @@ struct drm_mm {
struct list_head unused_nodes;
int num_unused;
spinlock_t unused_lock;
+ unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned scan_hit_size;
unsigned scanned_blocks;
+ unsigned long scan_start;
+ unsigned long scan_end;
};
/*
@@ -145,6 +148,10 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment);
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node);
int drm_mm_scan_remove_block(struct drm_mm_node *node);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 883c1d43989..fe29ae328bd 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -142,6 +142,42 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -419,6 +455,10 @@
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index a2776e2807a..0039f1f97ad 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -289,6 +289,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
typedef struct drm_i915_getparam {
int param;
@@ -635,6 +636,17 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
#define I915_EXEC_BLT (3<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index d3c81946f61..9e343c0998b 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -2,17 +2,40 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-struct intel_gtt {
- /* Number of stolen gtt entries at the beginning. */
- unsigned int gtt_stolen_entries;
+
+const struct intel_gtt {
+ /* Size of memory reserved for graphics by the BIOS */
+ unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
-};
+ /* Whether i915 needs to use the dmar apis or not. */
+ unsigned int needs_dmar : 1;
+} *intel_gtt_get(void);
-struct intel_gtt *intel_gtt_get(void);
+void intel_gtt_chipset_flush(void);
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+ struct scatterlist **sg_list, int *num_sg);
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags);
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+ struct page **pages, unsigned int flags);
-#endif
+/* Special gtt memory types */
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+
+/* New caching attributes for gen6/sandybridge */
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index bc5590b1a1a..e2cfe80f6fc 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,16 +71,14 @@ struct drm_nouveau_gpuobj_free {
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
-#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
-#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
-#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
@@ -171,7 +169,6 @@ struct drm_nouveau_gem_pushbuf {
};
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
-#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
struct drm_nouveau_gem_cpu_prep {
uint32_t handle;
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 10f8b53bdd4..e95a86b8b68 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -906,6 +906,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_ACCEL_WORKING2 0x05
#define RADEON_INFO_TILING_CONFIG 0x06
#define RADEON_INFO_WANT_HYPERZ 0x07
+#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
struct drm_radeon_info {
uint32_t request;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc156a53..50852aad260 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@ struct ttm_placement {
* @is_iomem: is this io memory ?
* @size: size in byte
* @offset: offset from the base address
+ * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
*
* Structure indicating the bus placement of an object.
*/
@@ -83,7 +85,8 @@ struct ttm_bus_placement {
unsigned long size;
unsigned long offset;
bool is_iomem;
- bool io_reserved;
+ bool io_reserved_vm;
+ uint64_t io_reserved_count;
};
@@ -154,7 +157,6 @@ struct ttm_tt;
* keeps one refcount. When this refcount reaches zero,
* the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change.
- * @lock: spinlock protecting mostly synchronization members.
* @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +215,6 @@ struct ttm_buffer_object {
struct kref kref;
struct kref list_kref;
wait_queue_head_t event_queue;
- spinlock_t lock;
/**
* Members protected by the bo::reserved lock.
@@ -237,6 +238,7 @@ struct ttm_buffer_object {
struct list_head lru;
struct list_head ddestroy;
struct list_head swap;
+ struct list_head io_reserve_lru;
uint32_t val_seq;
bool seq_valid;
@@ -248,10 +250,10 @@ struct ttm_buffer_object {
atomic_t reserved;
/**
- * Members protected by the bo::lock
+ * Members protected by struct buffer_object_device::fence_lock
* In addition, setting sync_obj to anything else
* than NULL requires bo::reserved to be held. This allows for
- * checking NULL while reserved but not holding bo::lock.
+ * checking NULL while reserved but not holding the mentioned lock.
*/
void *sync_obj_arg;
@@ -364,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
/**
* ttm_bo_lock_delayed_workqueue
*
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8e0c848326b..1da8af6ac88 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -179,30 +179,6 @@ struct ttm_tt {
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
-/**
- * struct ttm_mem_type_manager
- *
- * @has_type: The memory type has been initialized.
- * @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
- * @size: Size of the managed region.
- * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
- * as defined in ttm_placement_common.h
- * @default_caching: The default caching policy used for a buffer object
- * placed in this memory type if the user doesn't provide one.
- * @manager: The range manager used for this memory type. FIXME: If the aperture
- * has a page size different from the underlying system, the granularity
- * of this manager should take care of this. But the range allocating code
- * in ttm_bo.c needs to be modified for this.
- * @lru: The lru list for this memory type.
- *
- * This structure is used to identify and manage memory types for a device.
- * It's set up by the ttm_bo_driver::init_mem_type method.
- */
-
struct ttm_mem_type_manager;
struct ttm_mem_type_manager_func {
@@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func {
void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
};
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
struct ttm_mem_type_manager {
struct ttm_bo_device *bdev;
@@ -303,6 +309,15 @@ struct ttm_mem_type_manager {
uint32_t default_caching;
const struct ttm_mem_type_manager_func *func;
void *priv;
+ struct mutex io_reserve_mutex;
+ bool use_io_reserve_lru;
+ bool io_reserve_fastpath;
+
+ /*
+ * Protected by @io_reserve_mutex:
+ */
+
+ struct list_head io_reserve_lru;
/*
* Protected by the global->lru_lock.
@@ -510,9 +525,12 @@ struct ttm_bo_global {
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
* @addr_space_mm: Range manager for the device address space.
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
+ * @val_seq: Current validation sequence.
* @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
* If a GPU lockup has been detected, this is forced to 0.
* @dev_mapping: A pointer to the struct address_space representing the
@@ -531,6 +549,7 @@ struct ttm_bo_device {
struct ttm_bo_driver *driver;
rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+ spinlock_t fence_lock;
/*
* Protected by the vm lock.
*/
@@ -541,6 +560,7 @@ struct ttm_bo_device {
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
+ uint32_t val_seq;
/*
* Protected by load / firstopen / lastclose /unload sync.
@@ -753,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-/**
- * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
- *
- * @bo Pointer to a struct ttm_buffer_object.
- * @bus_base On return the base of the PCI region
- * @bus_offset On return the byte offset into the PCI region
- * @bus_size On return the byte size of the buffer object or zero if
- * the buffer object memory is not accessible through a PCI region.
- *
- * Returns:
- * -EINVAL if the buffer object is currently not mappable.
- * 0 otherwise.
- */
-
-extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- unsigned long *bus_base,
- unsigned long *bus_offset,
- unsigned long *bus_size);
-
-extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-
extern void ttm_bo_global_release(struct drm_global_reference *ref);
extern int ttm_bo_global_init(struct drm_global_reference *ref);
@@ -810,6 +805,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+ bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+
+/**
* ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -859,11 +870,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
*/
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence);
+
+/**
+ * ttm_bo_reserve_locked:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Must be called with struct ttm_bo_global::lru_lock held,
+ * and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence,
+ uint32_t sequence);
+
/**
* ttm_bo_unreserve
*
@@ -874,6 +918,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
/**
+ * ttm_bo_unreserve_locked
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
+ */
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+
+/**
* ttm_bo_wait_unreserved
*
* @bo: A pointer to a struct ttm_buffer_object.
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475da9e..26cc7f9ffa4 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,10 @@
* @bo: refcounted buffer object pointer.
* @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
* adding a new sync object.
- * @reservied: Indicates whether @bo has been reserved for validation.
+ * @reserved: Indicates whether @bo has been reserved for validation.
+ * @removed: Indicates whether @bo has been removed from lru lists.
+ * @put_count: Number of outstanding references on bo::list_kref.
+ * @old_sync_obj: Pointer to a sync object about to be unreferenced
*/
struct ttm_validate_buffer {
@@ -49,6 +52,9 @@ struct ttm_validate_buffer {
struct ttm_buffer_object *bo;
void *new_sync_obj_arg;
bool reserved;
+ bool removed;
+ int put_count;
+ void *old_sync_obj;
};
/**
@@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* function ttm_eu_reserve_buffers
*
* @list: thread private list of ttm_validate_buffer structs.
- * @val_seq: A unique sequence number.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* has failed.
*/
-extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
+extern int ttm_eu_reserve_buffers(struct list_head *list);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 09ea4a1e950..eaf6cd75a1b 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -102,10 +102,8 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t,
extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
extern int agp_bind_memory(struct agp_memory *, off_t);
extern int agp_unbind_memory(struct agp_memory *);
-extern int agp_rebind_memory(void);
extern void agp_enable(struct agp_bridge_data *, u32);
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
extern void agp_backend_release(struct agp_bridge_data *);
-extern void agp_flush_chipset(struct agp_bridge_data *);
#endif /* _AGP_BACKEND_H */
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
index 904dec7d03a..a69554ef847 100644
--- a/include/linux/bfin_mac.h
+++ b/include/linux/bfin_mac.h
@@ -24,6 +24,7 @@ struct bfin_mii_bus_platform_data {
const unsigned short *mac_peripherals;
int phy_mode;
unsigned int phy_mask;
+ unsigned short vlan1_mask, vlan2_mask;
};
#endif
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a90b3892074..1c70028f81f 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -44,34 +44,24 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
extern int ddebug_remove_module(const char *mod_name);
#define dynamic_pr_debug(fmt, ...) do { \
- __label__ do_printk; \
- __label__ out; \
static struct _ddebug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
_DPRINTK_FLAGS_DEFAULT }; \
- JUMP_LABEL(&descriptor.enabled, do_printk); \
- goto out; \
-do_printk: \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
-out: ; \
+ if (unlikely(descriptor.enabled)) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#define dynamic_dev_dbg(dev, fmt, ...) do { \
- __label__ do_printk; \
- __label__ out; \
static struct _ddebug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
_DPRINTK_FLAGS_DEFAULT }; \
- JUMP_LABEL(&descriptor.enabled, do_printk); \
- goto out; \
-do_printk: \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
-out: ; \
+ if (unlikely(descriptor.enabled)) \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f16a01081e1..bec8b82889b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -48,8 +48,10 @@ extern int eth_validate_addr(struct net_device *dev);
-extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
+extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+ unsigned int rxqs);
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
+#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
/**
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 6ce1bca0172..65990ef612f 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -724,21 +724,30 @@ struct ext3_dir_entry_2 {
~EXT3_DIR_ROUND)
#define EXT3_MAX_REC_LEN ((1<<16)-1)
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
+#if (PAGE_CACHE_SIZE >= 65536)
if (len == EXT3_MAX_REC_LEN)
return 1 << 16;
+#endif
return len;
}
static inline __le16 ext3_rec_len_to_disk(unsigned len)
{
+#if (PAGE_CACHE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(EXT3_MAX_REC_LEN);
else if (len > (1 << 16))
BUG();
+#endif
return cpu_to_le16(len);
}
@@ -856,6 +865,7 @@ extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
extern void ext3_init_block_alloc_info(struct inode *);
extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
+extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
/* dir.c */
extern int ext3_check_dir_entry(const char *, struct inode *,
diff --git a/include/linux/fec.h b/include/linux/fec.h
index 5d3523d8dd0..bcff455d1d5 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -3,6 +3,8 @@
* Copyright (c) 2009 Orex Computed Radiography
* Baruch Siach <baruch@tkos.co.il>
*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
* Header file for the FEC platform data
*
* This program is free software; you can redistribute it and/or modify
@@ -16,6 +18,7 @@
struct fec_platform_data {
phy_interface_t phy;
+ unsigned char mac[ETH_ALEN];
};
#endif
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/gpio-i2cmux.h
new file mode 100644
index 00000000000..4a333bb0bd0
--- /dev/null
+++ b/include/linux/gpio-i2cmux.h
@@ -0,0 +1,38 @@
+/*
+ * gpio-i2cmux interface to platform code
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_GPIO_I2CMUX_H
+#define _LINUX_GPIO_I2CMUX_H
+
+/* MUX has no specific idle mode */
+#define GPIO_I2CMUX_NO_IDLE ((unsigned)-1)
+
+/**
+ * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of bitmasks of GPIO settings (low/high) for each
+ * position
+ * @n_values: Number of multiplexer positions (busses to instantiate)
+ * @gpios: Array of GPIO numbers used to control MUX
+ * @n_gpios: Number of GPIOs used to control MUX
+ * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ */
+struct gpio_i2cmux_platform_data {
+ int parent;
+ int base_nr;
+ const unsigned *values;
+ int n_values;
+ const unsigned *gpios;
+ int n_gpios;
+ unsigned idle;
+};
+
+#endif /* _LINUX_GPIO_I2CMUX_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 56cfe23ffb3..903576df88d 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -57,9 +57,10 @@ struct i2c_board_info;
* transmit an arbitrary number of messages without interruption.
* @count must be be less than 64k since msg.len is u16.
*/
-extern int i2c_master_send(struct i2c_client *client, const char *buf,
+extern int i2c_master_send(const struct i2c_client *client, const char *buf,
+ int count);
+extern int i2c_master_recv(const struct i2c_client *client, char *buf,
int count);
-extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
/* Transfer num messages.
*/
@@ -78,23 +79,25 @@ extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
-extern s32 i2c_smbus_read_byte(struct i2c_client *client);
-extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
-extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
-extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
+extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
+ u8 command);
+extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
-extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
+ u8 command);
+extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
u8 command, u16 value);
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
u8 command, u8 length, const u8 *values);
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
u8 command, u8 length,
const u8 *values);
#endif /* I2C */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index f7e73c338c4..dd3f2013964 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -103,7 +103,7 @@ struct __fdb_entry {
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
+typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook;
#endif
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
deleted file mode 100644
index 1d19ab2afa3..00000000000
--- a/include/linux/intel-gtt.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Common Intel AGPGART and GTT definitions.
- */
-#ifndef _INTEL_GTT_H
-#define _INTEL_GTT_H
-
-#include <linux/agp_backend.h>
-
-/* This is for Intel only GTT controls.
- *
- * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
- */
-
-#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
-#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
-
-/* flag for GFDT type */
-#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
-
-#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 2ae86aa21fc..27e79c27ba0 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -94,7 +94,7 @@ extern void jbd2_free(void *ptr, size_t size);
*
* This is an opaque datatype.
**/
-typedef struct handle_s handle_t; /* Atomic operation type */
+typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */
/**
@@ -416,7 +416,7 @@ struct jbd2_revoke_table_s;
* in so it can be fixed later.
*/
-struct handle_s
+struct jbd2_journal_handle
{
/* Which compound transaction is this update a part of? */
transaction_t *h_transaction;
@@ -1158,6 +1158,22 @@ static inline void jbd2_free_handle(handle_t *handle)
kmem_cache_free(jbd2_handle_cache, handle);
}
+/*
+ * jbd2_inode management (optional, for those file systems that want to use
+ * dynamically allocated jbd2_inode structures)
+ */
+extern struct kmem_cache *jbd2_inode_cache;
+
+static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
+{
+ return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
+}
+
+static inline void jbd2_free_inode(struct jbd2_inode *jinode)
+{
+ kmem_cache_free(jbd2_inode_cache, jinode);
+}
+
/* Primary revoke support */
#define JOURNAL_REVOKE_DEFAULT_HASH 256
extern int jbd2_journal_init_revoke(journal_t *, int);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 6cc38fc07ab..d4a62ab2ee5 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -24,5 +24,7 @@ struct kref {
void kref_init(struct kref *kref);
void kref_get(struct kref *kref);
int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+int kref_sub(struct kref *kref, unsigned int count,
+ void (*release) (struct kref *kref));
#endif /* _KREF_H_ */
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 34b2b7f33c3..257d3779f2a 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -44,14 +44,4 @@
#define NLMDBG_XDR 0x0100
#define NLMDBG_ALL 0x7fff
-
-/*
- * Support for printing NLM cookies in dprintk()
- */
-#ifdef RPC_DEBUG
-struct nlm_cookie;
-/* Call this function with the BKL held (it uses a static buffer) */
-extern const char *nlmdbg_cookie2a(const struct nlm_cookie *);
-#endif
-
#endif /* LINUX_LOCKD_DEBUG_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 2dee05e5119..ff9abff55aa 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -202,9 +202,9 @@ extern u32 nsm_local_state;
* Lockd client functions
*/
struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
-void nlm_release_call(struct nlm_rqst *);
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
+void nlmclnt_release_call(struct nlm_rqst *);
struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
void nlmclnt_finish_block(struct nlm_wait *block);
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
@@ -223,13 +223,14 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
const u32 version,
const char *hostname,
int noresvport);
+void nlmclnt_release_host(struct nlm_host *);
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const char *hostname,
const size_t hostname_len);
+void nlmsvc_release_host(struct nlm_host *);
struct rpc_clnt * nlm_bind_host(struct nlm_host *);
void nlm_rebind_host(struct nlm_host *);
struct nlm_host * nlm_get_host(struct nlm_host *);
-void nlm_release_host(struct nlm_host *);
void nlm_shutdown_hosts(void);
void nlm_host_rebooted(const struct nlm_reboot *);
@@ -267,6 +268,7 @@ unsigned long nlmsvc_retry_blocked(void);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
+void nlmsvc_release_call(struct nlm_rqst *);
/*
* File handling for the server personality
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 54cbbac1e71..5525d370701 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -18,6 +18,17 @@ struct mb_cache_entry {
} e_index;
};
+struct mb_cache {
+ struct list_head c_cache_list;
+ const char *c_name;
+ atomic_t c_entry_count;
+ int c_max_entries;
+ int c_bucket_bits;
+ struct kmem_cache *c_entry_cache;
+ struct list_head *c_block_hash;
+ struct list_head *c_index_hash;
+};
+
/* Functions on caches */
struct mb_cache *mb_cache_create(const char *, int);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 085f041197d..8e70310ee94 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -57,6 +57,10 @@
* is configured in 4-bit mode.
*/
#define TMIO_MMC_BLKSZ_2BYTES (1 << 1)
+/*
+ * Some controllers can support SDIO IRQ signalling.
+ */
+#define TMIO_MMC_SDIO_IRQ (1 << 2)
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -66,6 +70,7 @@ void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
struct tmio_mmc_dma {
void *chan_priv_tx;
void *chan_priv_rx;
+ int alignment_shift;
};
/*
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
new file mode 100644
index 00000000000..16b0261763e
--- /dev/null
+++ b/include/linux/mmc/dw_mmc.h
@@ -0,0 +1,217 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MMC_DW_MMC_H_
+#define _LINUX_MMC_DW_MMC_H_
+
+#define MAX_MCI_SLOTS 2
+
+enum dw_mci_state {
+ STATE_IDLE = 0,
+ STATE_SENDING_CMD,
+ STATE_SENDING_DATA,
+ STATE_DATA_BUSY,
+ STATE_SENDING_STOP,
+ STATE_DATA_ERROR,
+};
+
+enum {
+ EVENT_CMD_COMPLETE = 0,
+ EVENT_XFER_COMPLETE,
+ EVENT_DATA_COMPLETE,
+ EVENT_DATA_ERROR,
+ EVENT_XFER_ERROR
+};
+
+struct mmc_data;
+
+/**
+ * struct dw_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @pio_offset: Offset into the current scatterlist entry.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ * or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ * transfer is in progress.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ * command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @data_status: Snapshot of SR taken upon completion of the current
+ * data transfer. Only valid when EVENT_DATA_COMPLETE or
+ * EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ * to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @card_tasklet: Tasklet handling card detect.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ * to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ * processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ * rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @pdev: Platform device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @slot: Slots sharing this MMC controller.
+ * @data_shift: log2 of FIFO item size.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * The @mrq field of struct dw_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct dw_mci {
+ spinlock_t lock;
+ void __iomem *regs;
+
+ struct scatterlist *sg;
+ unsigned int pio_offset;
+
+ struct dw_mci_slot *cur_slot;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ /* DMA interface members*/
+ int use_dma;
+
+ dma_addr_t sg_dma;
+ void *sg_cpu;
+ struct dw_mci_dma_ops *dma_ops;
+#ifdef CONFIG_MMC_DW_IDMAC
+ unsigned int ring_size;
+#else
+ struct dw_mci_dma_data *dma_data;
+#endif
+ u32 cmd_status;
+ u32 data_status;
+ u32 stop_cmdr;
+ u32 dir_status;
+ struct tasklet_struct tasklet;
+ struct tasklet_struct card_tasklet;
+ unsigned long pending_events;
+ unsigned long completed_events;
+ enum dw_mci_state state;
+ struct list_head queue;
+
+ u32 bus_hz;
+ u32 current_speed;
+ u32 num_slots;
+ struct platform_device *pdev;
+ struct dw_mci_board *pdata;
+ struct dw_mci_slot *slot[MAX_MCI_SLOTS];
+
+ /* FIFO push and pull */
+ int data_shift;
+ void (*push_data)(struct dw_mci *host, void *buf, int cnt);
+ void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+
+ /* Workaround flags */
+ u32 quirks;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct dw_mci_dma_ops {
+ /* DMA Ops */
+ int (*init)(struct dw_mci *host);
+ void (*start)(struct dw_mci *host, unsigned int sg_len);
+ void (*complete)(struct dw_mci *host);
+ void (*stop)(struct dw_mci *host);
+ void (*cleanup)(struct dw_mci *host);
+ void (*exit)(struct dw_mci *host);
+};
+
+/* IP Quirks/flags. */
+/* No special quirks or flags to cater for */
+#define DW_MCI_QUIRK_NONE 0
+/* DTO fix for command transmission with IDMAC configured */
+#define DW_MCI_QUIRK_IDMAC_DTO 1
+/* delay needed between retries on some 2.11a implementations */
+#define DW_MCI_QUIRK_RETRY_DELAY 2
+/* High Speed Capable - Supports HS cards (upto 50MHz) */
+#define DW_MCI_QUIRK_HIGHSPEED 4
+
+
+struct dma_pdata;
+
+struct block_settings {
+ unsigned short max_segs; /* see blk_queue_max_segments */
+ unsigned int max_blk_size; /* maximum size of one mmc block */
+ unsigned int max_blk_count; /* maximum number of blocks in one req*/
+ unsigned int max_req_size; /* maximum number of bytes in one req*/
+ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
+};
+
+/* Board platform data */
+struct dw_mci_board {
+ u32 num_slots;
+
+ u32 quirks; /* Workaround / Quirk flags */
+ unsigned int bus_hz; /* Bus speed */
+
+ /* delay in mS before detecting cards after interrupt */
+ u32 detect_delay_ms;
+
+ int (*init)(u32 slot_id, irq_handler_t , void *);
+ int (*get_ro)(u32 slot_id);
+ int (*get_cd)(u32 slot_id);
+ int (*get_ocr)(u32 slot_id);
+ int (*get_bus_wd)(u32 slot_id);
+ /*
+ * Enable power to selected slot and set voltage to desired level.
+ * Voltage levels are specified using MMC_VDD_xxx defines defined
+ * in linux/mmc/host.h file.
+ */
+ void (*setpower)(u32 slot_id, u32 volt);
+ void (*exit)(u32 slot_id);
+ void (*select_slot)(u32 slot_id);
+
+ struct dw_mci_dma_ops *dma_ops;
+ struct dma_pdata *data;
+ struct block_settings *blk_settings;
+};
+
+#endif /* _LINUX_MMC_DW_MMC_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 30f6fad99a5..bcb793ec737 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -131,6 +131,9 @@ struct mmc_host {
unsigned int f_max;
unsigned int f_init;
u32 ocr_avail;
+ u32 ocr_avail_sdio; /* SDIO-specific OCR */
+ u32 ocr_avail_sd; /* SD-specific OCR */
+ u32 ocr_avail_mmc; /* MMC-specific OCR */
struct notifier_block pm_notify;
#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
@@ -169,9 +172,20 @@ struct mmc_host {
#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
/* DDR mode at 1.2V */
#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
+#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
mmc_pm_flag_t pm_caps; /* supported pm features */
+#ifdef CONFIG_MMC_CLKGATE
+ int clk_requests; /* internal reference counter */
+ unsigned int clk_delay; /* number of MCI clk hold cycles */
+ bool clk_gated; /* clock gated */
+ struct work_struct clk_gate_work; /* delayed clock gate */
+ unsigned int clk_old; /* old clock value cache */
+ spinlock_t clk_lock; /* lock for clk fields */
+ struct mutex clk_gate_mutex; /* mutex for clock gating */
+#endif
+
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_segs; /* see blk_queue_max_segments */
@@ -307,5 +321,10 @@ static inline int mmc_card_is_removable(struct mmc_host *host)
return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
}
+static inline int mmc_card_is_powered_resumed(struct mmc_host *host)
+{
+ return host->pm_flags & MMC_PM_KEEP_POWER;
+}
+
#endif
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 956fbd87769..612301f85d1 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -40,7 +40,9 @@
#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */
#define MMC_STOP_TRANSMISSION 12 /* ac R1b */
#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */
+#define MMC_BUS_TEST_R 14 /* adtc R1 */
#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */
+#define MMC_BUS_TEST_W 19 /* adtc R1 */
#define MMC_SPI_READ_OCR 58 /* spi spi_R3 */
#define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 1fdc673f239..83bd9f76709 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -83,6 +83,8 @@ struct sdhci_host {
#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
+/* Controller treats ADMA descriptors with length 0000h incorrectly */
+#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -139,6 +141,10 @@ struct sdhci_host {
unsigned int caps; /* Alternative capabilities */
+ unsigned int ocr_avail_sdio; /* OCR bit masks */
+ unsigned int ocr_avail_sd;
+ unsigned int ocr_avail_mmc;
+
unsigned long private[0] ____cacheline_aligned;
};
#endif /* __SDHCI_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0f6b1c96581..be4957cf651 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2191,11 +2191,15 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
extern void ether_setup(struct net_device *dev);
/* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
void (*setup)(struct net_device *),
- unsigned int queue_count);
+ unsigned int txqs, unsigned int rxqs);
#define alloc_netdev(sizeof_priv, name, setup) \
- alloc_netdev_mq(sizeof_priv, name, setup, 1)
+ alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+
+#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
+ alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
+
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
@@ -2303,7 +2307,7 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
+int netif_skb_features(struct sk_buff *skb);
static inline int net_gso_ok(int features, int gso_type)
{
@@ -2317,16 +2321,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+static inline int netif_needs_gso(struct sk_buff *skb, int features)
{
- if (skb_is_gso(skb)) {
- int features = netif_get_vlan_features(skb, dev);
-
- return (!skb_gso_ok(skb, features) ||
- unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
- }
-
- return 0;
+ return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
+ unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
static inline void netif_set_gso_max_size(struct net_device *dev,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 742bec05144..6712e713b29 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info);
* necessary for reading the counters.
*/
struct xt_info_lock {
- spinlock_t lock;
+ seqlock_t lock;
unsigned char readers;
};
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void)
local_bh_disable();
lock = &__get_cpu_var(xt_info_locks);
if (likely(!lock->readers++))
- spin_lock(&lock->lock);
+ write_seqlock(&lock->lock);
}
static inline void xt_info_rdunlock_bh(void)
@@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void)
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
if (likely(!--lock->readers))
- spin_unlock(&lock->lock);
+ write_sequnlock(&lock->lock);
local_bh_enable();
}
@@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void)
*/
static inline void xt_info_wrlock(unsigned int cpu)
{
- spin_lock(&per_cpu(xt_info_locks, cpu).lock);
+ write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
}
static inline void xt_info_wrunlock(unsigned int cpu)
{
- spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
+ write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
}
/*
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index ac33806ec7f..6ccfe3b641e 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -11,6 +11,9 @@
#define NFS3_MAXGROUPS 16
#define NFS3_FHSIZE 64
#define NFS3_COOKIESIZE 4
+#define NFS3_CREATEVERFSIZE 8
+#define NFS3_COOKIEVERFSIZE 8
+#define NFS3_WRITEVERFSIZE 8
#define NFS3_FIFO_DEV (-1)
#define NFS3MODE_FMT 0170000
#define NFS3MODE_DIR 0040000
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 4925b22219d..9b46300b430 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -111,9 +111,13 @@
#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001
#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002
+#define EXCHGID4_FLAG_BIND_PRINC_STATEID 0x00000100
+
#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000
#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000
#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000
+#define EXCHGID4_FLAG_MASK_PNFS 0x00070000
+
#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
/*
@@ -121,8 +125,8 @@
* they're set in the argument or response, have separate
* invalid flag masks for arg (_A) and resp (_R).
*/
-#define EXCHGID4_FLAG_MASK_A 0x40070003
-#define EXCHGID4_FLAG_MASK_R 0x80070003
+#define EXCHGID4_FLAG_MASK_A 0x40070103
+#define EXCHGID4_FLAG_MASK_R 0x80070103
#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 452d96436d2..b197563913b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -47,11 +47,6 @@ struct nfs_client {
u64 cl_clientid; /* constant */
unsigned long cl_state;
- struct rb_root cl_openowner_id;
- struct rb_root cl_lockowner_id;
-
- struct list_head cl_delegations;
- struct rb_root cl_state_owners;
spinlock_t cl_lock;
unsigned long cl_lease_time;
@@ -71,6 +66,7 @@ struct nfs_client {
*/
char cl_ipaddr[48];
unsigned char cl_id_uniquifier;
+ u32 cl_cb_ident; /* v4.0 callback identifier */
const struct nfs4_minor_version_ops *cl_mvops;
#endif /* CONFIG_NFS_V4 */
@@ -148,7 +144,14 @@ struct nfs_server {
that are supported on this
filesystem */
struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
+ struct rpc_wait_queue roc_rpcwaitq;
+
+ /* the following fields are protected by nfs_client->cl_lock */
+ struct rb_root state_owners;
+ struct rb_root openowner_id;
+ struct rb_root lockowner_id;
#endif
+ struct list_head delegations;
void (*destroy)(struct nfs_server *);
atomic_t active; /* Keep trace of any activity to this server */
@@ -196,6 +199,7 @@ struct nfs4_slot_table {
* op for dynamic resizing */
int target_max_slots; /* Set by CB_RECALL_SLOT as
* the new max_slots */
+ struct completion complete;
};
static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
@@ -212,7 +216,6 @@ struct nfs4_session {
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
- struct completion complete;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 80f07198a31..b0068579bec 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -208,6 +208,7 @@ struct nfs4_layoutget_args {
struct inode *inode;
struct nfs_open_context *ctx;
struct nfs4_sequence_args seq_args;
+ nfs4_stateid stateid;
};
struct nfs4_layoutget_res {
@@ -223,7 +224,6 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
struct pnfs_layout_segment **lsegpp;
- int status;
};
struct nfs4_getdeviceinfo_args {
@@ -317,6 +317,7 @@ struct nfs_closeres {
struct nfs_lowner {
__u64 clientid;
__u64 id;
+ dev_t s_dev;
};
struct nfs_lock_args {
@@ -484,6 +485,7 @@ struct nfs_entry {
struct nfs_fh * fh;
struct nfs_fattr * fattr;
unsigned char d_type;
+ struct nfs_server * server;
};
/*
@@ -1089,7 +1091,7 @@ struct nfs_rpc_ops {
int (*pathconf) (struct nfs_server *, struct nfs_fh *,
struct nfs_pathconf *);
int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
- __be32 *(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int plus);
+ int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
void (*read_setup) (struct nfs_read_data *, struct rpc_message *);
int (*read_done) (struct rpc_task *, struct nfs_read_data *);
void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cb845c16ad7..ab47732d81e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -518,6 +518,7 @@
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
#define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603
+#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
@@ -1650,6 +1651,11 @@
#define PCI_DEVICE_ID_O2_6836 0x6836
#define PCI_DEVICE_ID_O2_6812 0x6872
#define PCI_DEVICE_ID_O2_6933 0x6933
+#define PCI_DEVICE_ID_O2_8120 0x8120
+#define PCI_DEVICE_ID_O2_8220 0x8220
+#define PCI_DEVICE_ID_O2_8221 0x8221
+#define PCI_DEVICE_ID_O2_8320 0x8320
+#define PCI_DEVICE_ID_O2_8321 0x8321
#define PCI_VENDOR_ID_3DFX 0x121a
#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
@@ -2363,6 +2369,8 @@
#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
+#define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391
+#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392
#define PCI_VENDOR_ID_KORENIX 0x1982
#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d1a9193960f..223b14cd129 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -31,8 +31,9 @@ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
#define quota_error(sb, fmt, args...) \
__quota_error((sb), __func__, fmt , ## args)
-extern void __quota_error(struct super_block *sb, const char *func,
- const char *fmt, ...);
+extern __attribute__((format (printf, 3, 4)))
+void __quota_error(struct super_block *sb, const char *func,
+ const char *fmt, ...);
/*
* declaration of quota_function calls in kernel.
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 14dbc83ded2..3c995b4d742 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -107,12 +107,17 @@ extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year
extern int rtc_valid_tm(struct rtc_time *tm);
extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
+ktime_t rtc_tm_to_ktime(struct rtc_time tm);
+struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/mutex.h>
+#include <linux/timerqueue.h>
+#include <linux/workqueue.h>
extern struct class *rtc_class;
@@ -151,7 +156,19 @@ struct rtc_class_ops {
};
#define RTC_DEVICE_NAME_SIZE 20
-struct rtc_task;
+typedef struct rtc_task {
+ void (*func)(void *private_data);
+ void *private_data;
+} rtc_task_t;
+
+
+struct rtc_timer {
+ struct rtc_task task;
+ struct timerqueue_node node;
+ ktime_t period;
+ int enabled;
+};
+
/* flags */
#define RTC_DEV_BUSY 0
@@ -179,16 +196,13 @@ struct rtc_device
spinlock_t irq_task_lock;
int irq_freq;
int max_user_freq;
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- struct work_struct uie_task;
- struct timer_list uie_timer;
- /* Those fields are protected by rtc->irq_lock */
- unsigned int oldsecs;
- unsigned int uie_irq_active:1;
- unsigned int stop_uie_polling:1;
- unsigned int uie_task_active:1;
- unsigned int uie_timer_active:1;
-#endif
+
+ struct timerqueue_head timerqueue;
+ struct rtc_timer aie_timer;
+ struct rtc_timer uie_rtctimer;
+ struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
+ int pie_enabled;
+ struct work_struct irqwork;
};
#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
@@ -224,15 +238,22 @@ extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
unsigned int enabled);
-typedef struct rtc_task {
- void (*func)(void *private_data);
- void *private_data;
-} rtc_task_t;
+void rtc_aie_update_irq(void *private);
+void rtc_uie_update_irq(void *private);
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
+void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+ ktime_t expires, ktime_t period);
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
+void rtc_timer_do_work(struct work_struct *work);
+
static inline bool is_leap_year(unsigned int year)
{
return (!(year % 4) && (year % 100)) || !(year % 400);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index b2024757edd..8521067ed4f 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -110,9 +110,9 @@ struct rpc_credops {
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
int (*crrefresh)(struct rpc_task *);
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
- int (*crwrap_req)(struct rpc_task *, kxdrproc_t,
+ int (*crwrap_req)(struct rpc_task *, kxdreproc_t,
void *, __be32 *, void *);
- int (*crunwrap_resp)(struct rpc_task *, kxdrproc_t,
+ int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
void *, __be32 *, void *);
};
@@ -139,8 +139,8 @@ struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *
void put_rpccred(struct rpc_cred *);
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
-int rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj);
-int rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, __be32 *data, void *obj);
+int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
+int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
int rpcauth_refreshcred(struct rpc_task *);
void rpcauth_invalcred(struct rpc_task *);
int rpcauth_uptodatecred(struct rpc_task *);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 7c91260c44a..c50b458b8a3 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -43,10 +43,18 @@ int bc_send(struct rpc_rqst *req);
*/
static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
{
- if (rqstp->rq_server->bc_xprt)
+ if (rqstp->rq_server->sv_bc_xprt)
return 1;
return 0;
}
+static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
+{
+ if (svc_is_backchannel(rqstp))
+ return (struct nfs4_sessionid *)
+ rqstp->rq_server->sv_bc_xprt->xpt_bc_sid;
+ return NULL;
+}
+
#else /* CONFIG_NFS_V4_1 */
static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
unsigned int min_reqs)
@@ -59,6 +67,11 @@ static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
return 0;
}
+static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
+{
+ return NULL;
+}
+
static inline void xprt_free_bc_request(struct rpc_rqst *req)
{
}
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index a5a55f284b7..ef9476a36ff 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -89,8 +89,8 @@ struct rpc_version {
*/
struct rpc_procinfo {
u32 p_proc; /* RPC procedure number */
- kxdrproc_t p_encode; /* XDR encode function */
- kxdrproc_t p_decode; /* XDR decode function */
+ kxdreproc_t p_encode; /* XDR encode function */
+ kxdrdproc_t p_decode; /* XDR decode function */
unsigned int p_arglen; /* argument hdr length (u32) */
unsigned int p_replen; /* reply hdr length (u32) */
unsigned int p_count; /* call count */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 5a3085b9b39..c81d4d8be3a 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -99,7 +99,7 @@ struct svc_serv {
spinlock_t sv_cb_lock; /* protects the svc_cb_list */
wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
* entries in the svc_cb_list */
- struct svc_xprt *bc_xprt;
+ struct svc_xprt *sv_bc_xprt; /* callback on fore channel */
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index aea0d438e3c..357da5e0daa 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -78,6 +78,7 @@ struct svc_xprt {
size_t xpt_remotelen; /* length of address */
struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
struct list_head xpt_users; /* callbacks on free */
+ void *xpt_bc_sid; /* back channel session ID */
struct net *xpt_net;
};
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 498ab93a81e..fc84b7a19ca 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -33,8 +33,8 @@ struct xdr_netobj {
};
/*
- * This is the generic XDR function. rqstp is either a rpc_rqst (client
- * side) or svc_rqst pointer (server side).
+ * This is the legacy generic XDR function. rqstp is either a rpc_rqst
+ * (client side) or svc_rqst pointer (server side).
* Encode functions always assume there's enough room in the buffer.
*/
typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
@@ -201,14 +201,22 @@ struct xdr_stream {
__be32 *end; /* end of available buffer space */
struct kvec *iov; /* pointer to the current kvec */
+ struct kvec scratch; /* Scratch buffer */
+ struct page **page_ptr; /* pointer to the current page */
};
+/*
+ * These are the xdr_stream style generic XDR encode and decode functions.
+ */
+typedef void (*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
-extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes);
+extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d3e4f87e95c..c6814616653 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -32,7 +32,7 @@ struct tracepoint {
int state; /* State. */
void (*regfunc)(void);
void (*unregfunc)(void);
- struct tracepoint_func *funcs;
+ struct tracepoint_func __rcu *funcs;
} __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is
* globally visible and gcc happily
@@ -326,7 +326,7 @@ do_trace: \
* memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
* __entry->next_pid = next->pid;
* __entry->next_prio = next->prio;
- * )
+ * ),
*
* *
* * Formatted output of a trace record via TP_printk().
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index ae9ab13b963..4b9a7f596f9 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -33,6 +33,7 @@ struct vga_switcheroo_handler {
void vga_switcheroo_unregister_client(struct pci_dev *dev);
int vga_switcheroo_register_client(struct pci_dev *dev,
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+ void (*reprobe)(struct pci_dev *dev),
bool (*can_switch)(struct pci_dev *dev));
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
@@ -48,6 +49,7 @@ int vga_switcheroo_process_delayed_switch(void);
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+ void (*reprobe)(struct pci_dev *dev),
bool (*can_switch)(struct pci_dev *dev)) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
diff --git a/include/net/ah.h b/include/net/ah.h
index f0129f79a31..be7798dea6f 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -4,7 +4,7 @@
#include <linux/skbuff.h>
/* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN 12
+#define MAX_AH_AUTH_LEN 16
struct crypto_ahash;
diff --git a/include/net/arp.h b/include/net/arp.h
index f4cf6ce6658..91f0568a04e 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -25,5 +25,6 @@ extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
const unsigned char *src_hw,
const unsigned char *target_hw);
extern void arp_xmit(struct sk_buff *skb);
+int arp_invalidate(struct net_device *dev, __be32 ip);
#endif /* _ARP_H */
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index d5df797f954..5395e09187d 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -107,8 +107,8 @@ struct phonet_protocol {
int sock_type;
};
-int phonet_proto_register(int protocol, struct phonet_protocol *pp);
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
+int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
int phonet_sysctl_init(void);
void phonet_sysctl_exit(void);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 0af57ebae76..e9eee99d8b1 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
return q->q.qlen;
}
-static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
return (struct qdisc_skb_cb *)skb->cb;
}
@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
return true;
}
-static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{
return qdisc_skb_cb(skb)->pkt_len;
}
@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}
-static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
+
+static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+ const struct sk_buff *skb)
+{
+ bstats->bytes += qdisc_pkt_len(skb);
+ bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+}
+
+static inline void qdisc_bstats_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
{
- sch->bstats.bytes += len;
- sch->bstats.packets++;
+ bstats_update(&sch->bstats, skb);
}
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{
__skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
- __qdisc_update_bstats(sch, qdisc_pkt_len(skb));
+ qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 21a02f7e4f4..d884d268c70 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -152,14 +152,18 @@ struct sock_common {
* fields between dontcopy_begin/dontcopy_end
* are not copied in sock_copy()
*/
+ /* private: */
int skc_dontcopy_begin[0];
+ /* public: */
union {
struct hlist_node skc_node;
struct hlist_nulls_node skc_nulls_node;
};
int skc_tx_queue_mapping;
atomic_t skc_refcnt;
+ /* private: */
int skc_dontcopy_end[0];
+ /* public: */
};
/**
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index b0b4eb24d59..da39b22636f 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -21,6 +21,16 @@
#undef CREATE_TRACE_POINTS
#include <linux/stringify.h>
+/*
+ * module.h includes tracepoints, and because ftrace.h
+ * pulls in module.h:
+ * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
+ * linux/ftrace.h -> linux/module.h
+ * we must include module.h here before we play with any of
+ * the TRACE_EVENT() macros, otherwise the tracepoints included
+ * by module.h may break the build.
+ */
+#include <linux/module.h>
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 75ce9d500d8..f10293c41b1 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -25,9 +25,7 @@ TRACE_EVENT(kfree_skb,
TP_fast_assign(
__entry->skbaddr = skb;
- if (skb) {
- __entry->protocol = ntohs(skb->protocol);
- }
+ __entry->protocol = ntohs(skb->protocol);
__entry->location = location;
),
diff --git a/kernel/Makefile b/kernel/Makefile
index 33e0a39cf35..5669f71dfdd 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
+obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/kernel/exit.c b/kernel/exit.c
index 89c74861a3d..f9a45ebcc7b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -994,6 +994,15 @@ NORET_TYPE void do_exit(long code)
exit_fs(tsk);
check_stack_usage();
exit_thread();
+
+ /*
+ * Flush inherited counters to the parent - before the parent
+ * gets woken up by child-exit notifications.
+ *
+ * because of cgroup mode, must be called before cgroup_exit()
+ */
+ perf_event_exit_task(tsk);
+
cgroup_exit(tsk, 1);
if (group_dead)
@@ -1007,11 +1016,6 @@ NORET_TYPE void do_exit(long code)
* FIXME: do that only when needed, using sched_exit tracepoint
*/
flush_ptrace_hw_breakpoint(tsk);
- /*
- * Flush inherited counters to the parent - before the parent
- * gets woken up by child-exit notifications.
- */
- perf_event_exit_task(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 11847bf1e8c..b782b7a79f0 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -38,6 +38,12 @@
#include <asm/irq_regs.h>
+enum event_type_t {
+ EVENT_FLEXIBLE = 0x1,
+ EVENT_PINNED = 0x2,
+ EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
atomic_t perf_task_events __read_mostly;
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
static atomic64_t perf_event_id;
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type);
+
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type);
+
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
@@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void)
return "pmu";
}
+static inline u64 perf_clock(void)
+{
+ return local_clock();
+}
+
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
put_ctx(ctx);
}
-static inline u64 perf_clock(void)
-{
- return local_clock();
-}
-
/*
* Update the record of the current time in a context.
*/
@@ -256,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx)
ctx->timestamp = now;
}
+static u64 perf_event_time(struct perf_event *event)
+{
+ struct perf_event_context *ctx = event->ctx;
+ return ctx ? ctx->time : 0;
+}
+
/*
* Update the total_time_enabled and total_time_running fields for a event.
*/
@@ -269,7 +287,7 @@ static void update_event_times(struct perf_event *event)
return;
if (ctx->is_active)
- run_end = ctx->time;
+ run_end = perf_event_time(event);
else
run_end = event->tstamp_stopped;
@@ -278,7 +296,7 @@ static void update_event_times(struct perf_event *event)
if (event->state == PERF_EVENT_STATE_INACTIVE)
run_end = event->tstamp_stopped;
else
- run_end = ctx->time;
+ run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running;
}
@@ -534,6 +552,7 @@ event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
+ u64 tstamp = perf_event_time(event);
u64 delta;
/*
* An event which could not be activated because of
@@ -545,7 +564,7 @@ event_sched_out(struct perf_event *event,
&& !event_filter_match(event)) {
delta = ctx->time - event->tstamp_stopped;
event->tstamp_running += delta;
- event->tstamp_stopped = ctx->time;
+ event->tstamp_stopped = tstamp;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -556,7 +575,7 @@ event_sched_out(struct perf_event *event,
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
- event->tstamp_stopped = ctx->time;
+ event->tstamp_stopped = tstamp;
event->pmu->del(event, 0);
event->oncpu = -1;
@@ -768,6 +787,8 @@ event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
+ u64 tstamp = perf_event_time(event);
+
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
@@ -784,9 +805,9 @@ event_sched_in(struct perf_event *event,
return -EAGAIN;
}
- event->tstamp_running += ctx->time - event->tstamp_stopped;
+ event->tstamp_running += tstamp - event->tstamp_stopped;
- event->shadow_ctx_time = ctx->time - ctx->timestamp;
+ event->shadow_ctx_time = tstamp - ctx->timestamp;
if (!is_software_event(event))
cpuctx->active_oncpu++;
@@ -898,11 +919,13 @@ static int group_can_go_on(struct perf_event *event,
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
+ u64 tstamp = perf_event_time(event);
+
list_add_event(event, ctx);
perf_group_attach(event);
- event->tstamp_enabled = ctx->time;
- event->tstamp_running = ctx->time;
- event->tstamp_stopped = ctx->time;
+ event->tstamp_enabled = tstamp;
+ event->tstamp_running = tstamp;
+ event->tstamp_stopped = tstamp;
}
/*
@@ -937,7 +960,7 @@ static void __perf_install_in_context(void *info)
add_event_to_ctx(event, ctx);
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
goto unlock;
/*
@@ -1042,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *sub;
+ u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE;
- event->tstamp_enabled = ctx->time - event->total_time_enabled;
+ event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
- if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
- sub->tstamp_enabled =
- ctx->time - sub->total_time_enabled;
- }
+ if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+ sub->tstamp_enabled = tstamp - sub->total_time_enabled;
}
}
@@ -1082,7 +1104,7 @@ static void __perf_event_enable(void *info)
goto unlock;
__perf_event_mark_enabled(event, ctx);
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
goto unlock;
/*
@@ -1193,12 +1215,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
return 0;
}
-enum event_type_t {
- EVENT_FLEXIBLE = 0x1,
- EVENT_PINNED = 0x2,
- EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
@@ -1435,7 +1451,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
continue;
if (group_can_go_on(event, cpuctx, 1))
@@ -1467,7 +1483,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
continue;
if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1694,7 +1710,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
continue;
hwc = &event->hw;
@@ -3893,7 +3909,7 @@ static int perf_event_task_match(struct perf_event *event)
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
return 0;
if (event->attr.comm || event->attr.mmap ||
@@ -4030,7 +4046,7 @@ static int perf_event_comm_match(struct perf_event *event)
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
return 0;
if (event->attr.comm)
@@ -4178,7 +4194,7 @@ static int perf_event_mmap_match(struct perf_event *event,
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
- if (event->cpu != -1 && event->cpu != smp_processor_id())
+ if (!event_filter_match(event))
return 0;
if ((!executable && event->attr.mmap_data) ||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 53f338190b2..761c510a06c 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
-obj-$(CONFIG_EVENT_TRACING) += power-traces.o
+obj-$(CONFIG_TRACEPOINTS) += power-traces.o
ifeq ($(CONFIG_TRACING),y)
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8cf959bad4..dc53ecb8058 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1313,12 +1313,10 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
__this_cpu_inc(user_stack_count);
-
-
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
sizeof(*entry), flags, pc);
if (!event)
- return;
+ goto out_drop_count;
entry = ring_buffer_event_data(event);
entry->tgid = current->tgid;
@@ -1333,8 +1331,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+ out_drop_count:
__this_cpu_dec(user_stack_count);
-
out:
preempt_enable();
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 3094318bfea..b335acb43be 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -141,11 +141,10 @@ static void ddebug_change(const struct ddebug_query *query,
else if (!dp->flags)
dt->num_enabled++;
dp->flags = newflags;
- if (newflags) {
- jump_label_enable(&dp->enabled);
- } else {
- jump_label_disable(&dp->enabled);
- }
+ if (newflags)
+ dp->enabled = 1;
+ else
+ dp->enabled = 0;
if (verbose)
printk(KERN_INFO
"ddebug: changed %s:%d [%s]%s %s\n",
diff --git a/lib/kref.c b/lib/kref.c
index d3d227a08a4..3efb882b11d 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -62,6 +62,36 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
return 0;
}
+
+/**
+ * kref_sub - subtract a number of refcounts for object.
+ * @kref: object.
+ * @count: Number of recounts to subtract.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function.
+ *
+ * Subtract @count from the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0. Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory. Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+int kref_sub(struct kref *kref, unsigned int count,
+ void (*release)(struct kref *kref))
+{
+ WARN_ON(release == NULL);
+ WARN_ON(release == (void (*)(struct kref *))kfree);
+
+ if (atomic_sub_and_test((int) count, &kref->refcount)) {
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
EXPORT_SYMBOL(kref_init);
EXPORT_SYMBOL(kref_get);
EXPORT_SYMBOL(kref_put);
+EXPORT_SYMBOL(kref_sub);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac7f10..1e308f21092 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
break;
case 's':{
char **sptr = va_arg(ap, char **);
- int16_t len;
- int size;
+ uint16_t len;
errcode = p9pdu_readf(pdu, proto_version,
"w", &len);
if (errcode)
break;
- size = max_t(int16_t, len, 0);
-
- *sptr = kmalloc(size + 1, GFP_KERNEL);
+ *sptr = kmalloc(len + 1, GFP_KERNEL);
if (*sptr == NULL) {
errcode = -EFAULT;
break;
}
- if (pdu_read(pdu, *sptr, size)) {
+ if (pdu_read(pdu, *sptr, len)) {
errcode = -EFAULT;
kfree(*sptr);
*sptr = NULL;
} else
- (*sptr)[size] = 0;
+ (*sptr)[len] = 0;
}
break;
case 'Q':{
@@ -234,14 +231,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'D':{
- int32_t *count = va_arg(ap, int32_t *);
+ uint32_t *count = va_arg(ap, uint32_t *);
void **data = va_arg(ap, void **);
errcode =
p9pdu_readf(pdu, proto_version, "d", count);
if (!errcode) {
*count =
- min_t(int32_t, *count,
+ min_t(uint32_t, *count,
pdu->size - pdu->offset);
*data = &pdu->sdata[pdu->offset];
}
@@ -404,9 +401,10 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
break;
case 's':{
const char *sptr = va_arg(ap, const char *);
- int16_t len = 0;
+ uint16_t len = 0;
if (sptr)
- len = min_t(int16_t, strlen(sptr), USHRT_MAX);
+ len = min_t(uint16_t, strlen(sptr),
+ USHRT_MAX);
errcode = p9pdu_writef(pdu, proto_version,
"w", len);
@@ -438,7 +436,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
stbuf->n_gid, stbuf->n_muid);
} break;
case 'D':{
- int32_t count = va_arg(ap, int32_t);
+ uint32_t count = va_arg(ap, uint32_t);
const void *data = va_arg(ap, const void *);
errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf50379..8184c031d02 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock,
if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
return -ENOPROTOOPT;
lock_sock(&(cf_sk->sk));
- cf_sk->conn_req.param.size = ol;
if (ol > sizeof(cf_sk->conn_req.param.data) ||
copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
release_sock(&cf_sk->sk);
return -EINVAL;
}
+ cf_sk->conn_req.param.size = ol;
release_sock(&cf_sk->sk);
return 0;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c9894..fa9dab372b6 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
int pktlen;
int err = 0;
+ const u8 *ip_version;
+ u8 buf;
priv = container_of(layr, struct chnl_net, chnl);
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
* send the packet to the net stack.
*/
skb->dev = priv->netdev;
- skb->protocol = htons(ETH_P_IP);
+
+ /* check the version of IP */
+ ip_version = skb_header_pointer(skb, 0, 1, &buf);
+ if (!ip_version)
+ return -EINVAL;
+ switch (*ip_version >> 4) {
+ case 4:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 6:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
/* If we change the header in loop mode, the checksum is corrupted. */
if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269d2e3..a3ef808b5e3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
-{
- return ((features & NETIF_F_NO_CSUM) ||
- ((features & NETIF_F_V4_CSUM) &&
- protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_V6_CSUM) &&
- protocol == htons(ETH_P_IPV6)) ||
- ((features & NETIF_F_FCOE_CRC) &&
- protocol == htons(ETH_P_FCOE)));
-}
-
-static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
-{
- __be16 protocol = skb->protocol;
- int features = dev->features;
-
- if (vlan_tx_tag_present(skb)) {
- features &= dev->vlan_features;
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- protocol = veh->h_vlan_encapsulated_proto;
- features &= dev->vlan_features;
- }
-
- return can_checksum_protocol(features, protocol);
-}
-
/**
* skb_dev_set -- assign a new device to a buffer
* @skb: buffer for the new device
@@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
/**
* dev_gso_segment - Perform emulated hardware segmentation on skb.
* @skb: buffer to segment
+ * @features: device features as applicable to this skb
*
* This function segments the given skb and stores the list of segments
* in skb->next.
*/
-static int dev_gso_segment(struct sk_buff *skb)
+static int dev_gso_segment(struct sk_buff *skb, int features)
{
- struct net_device *dev = skb->dev;
struct sk_buff *segs;
- int features = dev->features & ~(illegal_highdma(dev, skb) ?
- NETIF_F_SG : 0);
segs = skb_gso_segment(skb, features);
@@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb)
}
}
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+ return ((features & NETIF_F_GEN_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
+ protocol == htons(ETH_P_IP)) ||
+ ((features & NETIF_F_V6_CSUM) &&
+ protocol == htons(ETH_P_IPV6)) ||
+ ((features & NETIF_F_FCOE_CRC) &&
+ protocol == htons(ETH_P_FCOE)));
+}
+
+static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+{
+ if (!can_checksum_protocol(protocol, features)) {
+ features &= ~NETIF_F_ALL_CSUM;
+ features &= ~NETIF_F_SG;
+ } else if (illegal_highdma(skb->dev, skb)) {
+ features &= ~NETIF_F_SG;
+ }
+
+ return features;
+}
+
+int netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
+ int features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
- } else if (!skb->vlan_tci)
- return dev->features;
+ } else if (!vlan_tx_tag_present(skb)) {
+ return harmonize_features(skb, protocol, features);
+ }
- if (protocol != htons(ETH_P_8021Q))
- return dev->features & dev->vlan_features;
- else
- return 0;
+ features &= skb->dev->vlan_features;
+
+ if (protocol != htons(ETH_P_8021Q)) {
+ return harmonize_features(skb, protocol, features);
+ } else {
+ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM;
+ return harmonize_features(skb, protocol, features);
+ }
}
-EXPORT_SYMBOL(netif_get_vlan_features);
+EXPORT_SYMBOL(netif_skb_features);
/*
* Returns true if either:
@@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features);
* support DMA from it.
*/
static inline int skb_needs_linearize(struct sk_buff *skb,
- struct net_device *dev)
+ int features)
{
- if (skb_is_nonlinear(skb)) {
- int features = dev->features;
-
- if (vlan_tx_tag_present(skb))
- features &= dev->vlan_features;
-
- return (skb_has_frag_list(skb) &&
- !(features & NETIF_F_FRAGLIST)) ||
+ return skb_is_nonlinear(skb) &&
+ ((skb_has_frag_list(skb) &&
+ !(features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags &&
- (!(features & NETIF_F_SG) ||
- illegal_highdma(dev, skb)));
- }
-
- return 0;
+ !(features & NETIF_F_SG)));
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
+ int features;
+
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
@@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb_orphan_try(skb);
+ features = netif_skb_features(skb);
+
if (vlan_tx_tag_present(skb) &&
- !(dev->features & NETIF_F_HW_VLAN_TX)) {
+ !(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb))
goto out;
@@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb->vlan_tci = 0;
}
- if (netif_needs_gso(dev, skb)) {
- if (unlikely(dev_gso_segment(skb)))
+ if (netif_needs_gso(skb, features)) {
+ if (unlikely(dev_gso_segment(skb, features)))
goto out_kfree_skb;
if (skb->next)
goto gso;
} else {
- if (skb_needs_linearize(skb, dev) &&
+ if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb))
goto out_kfree_skb;
@@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
- if (!dev_can_checksum(dev, skb) &&
+ if (!(features & NETIF_F_ALL_CSUM) &&
skb_checksum_help(skb))
goto out_kfree_skb;
}
@@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
*/
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
- __qdisc_update_bstats(q, skb->len);
+
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_bstats_update(q, skb);
+
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
@@ -5621,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
}
/**
- * alloc_netdev_mq - allocate network device
+ * alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for
* @name: device name format string
* @setup: callback to initialize device
- * @queue_count: the number of subqueues to allocate
+ * @txqs: the number of TX subqueues to allocate
+ * @rxqs: the number of RX subqueues to allocate
*
* Allocates a struct net_device with private data area for driver use
* and performs basic initialization. Also allocates subquue structs
- * for each queue on the device at the end of the netdevice.
+ * for each queue on the device.
*/
-struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
- void (*setup)(struct net_device *), unsigned int queue_count)
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ void (*setup)(struct net_device *),
+ unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
size_t alloc_size;
@@ -5640,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
BUG_ON(strlen(name) >= sizeof(dev->name));
- if (queue_count < 1) {
+ if (txqs < 1) {
pr_err("alloc_netdev: Unable to allocate device "
"with zero queues.\n");
return NULL;
}
+#ifdef CONFIG_RPS
+ if (rxqs < 1) {
+ pr_err("alloc_netdev: Unable to allocate device "
+ "with zero RX queues.\n");
+ return NULL;
+ }
+#endif
+
alloc_size = sizeof(struct net_device);
if (sizeof_priv) {
/* ensure 32-byte alignment of private area */
@@ -5676,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
- dev->num_tx_queues = queue_count;
- dev->real_num_tx_queues = queue_count;
+ dev->num_tx_queues = txqs;
+ dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev))
goto free_pcpu;
#ifdef CONFIG_RPS
- dev->num_rx_queues = queue_count;
- dev->real_num_rx_queues = queue_count;
+ dev->num_rx_queues = rxqs;
+ dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_pcpu;
#endif
@@ -5711,7 +5720,7 @@ free_p:
kfree(p);
return NULL;
}
-EXPORT_SYMBOL(alloc_netdev_mq);
+EXPORT_SYMBOL(alloc_netdev_mqs);
/**
* free_netdev - free network device
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b27d4efdd4..afc58374ca9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter);
/**
* sk_run_filter - run a filter on a socket
* @skb: buffer to run the filter on
- * @filter: filter to apply
+ * @fentry: filter to apply
*
* Decode and apply filter instructions to the skb->data.
* Return length to keep, 0 for none. @skb is the data we are
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57f3bb..a5f7535aab5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
return -EPERM;
- if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+ if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 45087052d89..5fdb0722901 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq)
{
struct dccp_sock *dp = dccp_sk(sk);
- dp->dccps_gsr = seq;
+ if (after48(seq, dp->dccps_gsr))
+ dp->dccps_gsr = seq;
/* Sequence validity window depends on remote Sequence Window (7.5.1) */
dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
/*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247ea00..8cde009e8b8 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
*/
if (time_before(now, (dp->dccps_rate_last +
sysctl_dccp_sync_ratelimit)))
- return 0;
+ return -1;
DCCP_WARN("Step 6 failed for %s packet, "
"(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 563943822e5..42348824ee3 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
/* Boundary values */
static int zero = 0,
u8_max = 0xFF;
-static unsigned long seqw_min = 32;
+static unsigned long seqw_min = DCCPF_SEQ_WMIN,
+ seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
static struct ctl_table dccp_default_table[] = {
{
@@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &seqw_min, /* RFC 4340, 7.5.2 */
+ .extra2 = &seqw_max,
},
{
.procname = "rx_ccid",
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f1d81..f9d7ac924f1 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev)
EXPORT_SYMBOL(ether_setup);
/**
- * alloc_etherdev_mq - Allocates and sets up an Ethernet device
+ * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this Ethernet device
- * @queue_count: The number of queues this device has.
+ * @txqs: The number of TX queues this device has.
+ * @txqs: The number of RX queues this device has.
*
* Fill in the fields of the device structure with Ethernet-generic
* values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup);
* this private data area.
*/
-struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+ unsigned int rxqs)
{
- return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
+ return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
}
-EXPORT_SYMBOL(alloc_etherdev_mq);
+EXPORT_SYMBOL(alloc_etherdev_mqs);
static size_t _format_mac_addr(char *buf, int buflen,
const unsigned char *addr, int len)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec6dce..86961bec70a 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb->ip_summed = CHECKSUM_NONE;
- ah = (struct ip_auth_hdr *)skb->data;
- iph = ip_hdr(skb);
- ihl = ip_hdrlen(skb);
if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out;
nfrags = err;
+ ah = (struct ip_auth_hdr *)skb->data;
+ iph = ip_hdr(skb);
+ ihl = ip_hdrlen(skb);
+
work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
if (!work_iph)
goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b961db..04c8b69fd42 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
return err;
}
+int arp_invalidate(struct net_device *dev, __be32 ip)
+{
+ struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
+ int err = -ENXIO;
+
+ if (neigh) {
+ if (neigh->nud_state & ~NUD_NOARP)
+ err = neigh_update(neigh, NULL, NUD_FAILED,
+ NEIGH_UPDATE_F_OVERRIDE|
+ NEIGH_UPDATE_F_ADMIN);
+ neigh_release(neigh);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(arp_invalidate);
+
static int arp_req_delete_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
{
int err;
__be32 ip;
- struct neighbour *neigh;
if (r->arp_flags & ATF_PUBL)
return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
if (!dev)
return -EINVAL;
}
- err = -ENXIO;
- neigh = neigh_lookup(&arp_tbl, &ip, dev);
- if (neigh) {
- if (neigh->nud_state & ~NUD_NOARP)
- err = neigh_update(neigh, NULL, NUD_FAILED,
- NEIGH_UPDATE_F_OVERRIDE|
- NEIGH_UPDATE_F_ADMIN);
- neigh_release(neigh);
- }
- return err;
+ return arp_invalidate(dev, ip);
}
/*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e318153f1..97e5fb76526 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) {
+ ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+ !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fc..2746c1fa641 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
nlmsg_len(nlh) < hdrlen)
return -EINVAL;
- if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
if (nlmsg_attrlen(nlh, hdrlen)) {
struct nlattr *attr;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340a28d..e855fffaed9 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
- unsigned int curcpu = get_cpu();
-
- /* Instead of clearing (by a previous call to memset())
- * the counters and using adds, we set the counters
- * with data used by 'current' CPU
- *
- * Bottom half has to be disabled to prevent deadlock
- * if new softirq were to run and call ipt_do_table
- */
- local_bh_disable();
- i = 0;
- xt_entry_foreach(iter, t->entries[curcpu], t->size) {
- SET_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
- ++i;
- }
- local_bh_enable();
- /* Processing counters from other cpus, we can let bottom half enabled,
- * (preemption is disabled)
- */
for_each_possible_cpu(cpu) {
- if (cpu == curcpu)
- continue;
+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
i = 0;
- local_bh_disable();
- xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
- ADD_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
+ u64 bcnt, pcnt;
+ unsigned int start;
+
+ do {
+ start = read_seqbegin(lock);
+ bcnt = iter->counters.bcnt;
+ pcnt = iter->counters.pcnt;
+ } while (read_seqretry(lock, start));
+
+ ADD_COUNTER(counters[i], bcnt, pcnt);
++i;
}
- xt_info_wrunlock(cpu);
- local_bh_enable();
}
- put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
* about).
*/
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc(countersize);
+ counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
struct arpt_entry *iter;
ret = 0;
- counters = vmalloc(num_counters * sizeof(struct xt_counters));
+ counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d633b3b..652efea013d 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
- unsigned int curcpu = get_cpu();
-
- /* Instead of clearing (by a previous call to memset())
- * the counters and using adds, we set the counters
- * with data used by 'current' CPU.
- *
- * Bottom half has to be disabled to prevent deadlock
- * if new softirq were to run and call ipt_do_table
- */
- local_bh_disable();
- i = 0;
- xt_entry_foreach(iter, t->entries[curcpu], t->size) {
- SET_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
- ++i;
- }
- local_bh_enable();
- /* Processing counters from other cpus, we can let bottom half enabled,
- * (preemption is disabled)
- */
for_each_possible_cpu(cpu) {
- if (cpu == curcpu)
- continue;
+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
i = 0;
- local_bh_disable();
- xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
- ADD_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
+ u64 bcnt, pcnt;
+ unsigned int start;
+
+ do {
+ start = read_seqbegin(lock);
+ bcnt = iter->counters.bcnt;
+ pcnt = iter->counters.pcnt;
+ } while (read_seqretry(lock, start));
+
+ ADD_COUNTER(counters[i], bcnt, pcnt);
++i; /* macro does multi eval of i */
}
- xt_info_wrunlock(cpu);
- local_bh_enable();
}
- put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc(countersize);
+ counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ipt_entry *iter;
ret = 0;
- counters = vmalloc(num_counters * sizeof(struct xt_counters));
+ counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4ef26c..1aba54ae53c 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, ah_hlen))
goto out;
- ip6h = ipv6_hdr(skb);
-
- skb_push(skb, hdr_len);
if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out;
nfrags = err;
+ ah = (struct ip_auth_hdr *)skb->data;
+ ip6h = ipv6_hdr(skb);
+
+ skb_push(skb, hdr_len);
+
work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
if (!work_iph)
goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d1815..d144e629d2b 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
+ ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 455582384ec..7d227c644f7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
struct ip6t_entry *iter;
unsigned int cpu;
unsigned int i;
- unsigned int curcpu = get_cpu();
-
- /* Instead of clearing (by a previous call to memset())
- * the counters and using adds, we set the counters
- * with data used by 'current' CPU
- *
- * Bottom half has to be disabled to prevent deadlock
- * if new softirq were to run and call ipt_do_table
- */
- local_bh_disable();
- i = 0;
- xt_entry_foreach(iter, t->entries[curcpu], t->size) {
- SET_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
- ++i;
- }
- local_bh_enable();
- /* Processing counters from other cpus, we can let bottom half enabled,
- * (preemption is disabled)
- */
for_each_possible_cpu(cpu) {
- if (cpu == curcpu)
- continue;
+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
i = 0;
- local_bh_disable();
- xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
- ADD_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
+ u64 bcnt, pcnt;
+ unsigned int start;
+
+ do {
+ start = read_seqbegin(lock);
+ bcnt = iter->counters.bcnt;
+ pcnt = iter->counters.pcnt;
+ } while (read_seqretry(lock, start));
+
+ ADD_COUNTER(counters[i], bcnt, pcnt);
++i;
}
- xt_info_wrunlock(cpu);
- local_bh_enable();
}
- put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc(countersize);
+ counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ip6t_entry *iter;
ret = 0;
- counters = vmalloc(num_counters * sizeof(struct xt_counters));
+ counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0cdba50c0d6..5cb8d3027b1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
- rcu_read_lock();
+ spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
restart:
- hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
ct = nf_ct_tuplehash_to_ctrack(h);
- if (!atomic_inc_not_zero(&ct->ct_general.use))
- continue;
/* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0,
* then dump everything. */
if (l3proto && nf_ct_l3num(ct) != l3proto)
- goto releasect;
+ continue;
if (cb->args[1]) {
if (ct != last)
- goto releasect;
+ continue;
cb->args[1] = 0;
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -681,8 +679,6 @@ restart:
if (acct)
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
}
-releasect:
- nf_ct_put(ct);
}
if (cb->args[1]) {
cb->args[1] = 0;
@@ -690,7 +686,7 @@ releasect:
}
}
out:
- rcu_read_unlock();
+ spin_unlock_bh(&nf_conntrack_lock);
if (last)
nf_ct_put(last);
@@ -928,7 +924,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
u16 zone;
int err;
- if (nlh->nlmsg_flags & NLM_F_DUMP)
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
ctnetlink_done);
@@ -1790,7 +1786,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
u16 zone;
int err;
- if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
return netlink_dump_start(ctnl, skb, nlh,
ctnetlink_exp_dump_table,
ctnetlink_exp_done);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 80463507420..c9423763107 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@ static int __init xt_init(void)
for_each_possible_cpu(i) {
struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
- spin_lock_init(&lock->lock);
+
+ seqlock_init(&lock->lock);
lock->readers = 0;
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99145e..f83cb370292 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
security_netlink_recv(skb, CAP_NET_ADMIN))
return -EPERM;
- if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
if (ops->dumpit == NULL)
return -EOPNOTSUPP;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb72f5..1072b2c19d3 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
/* Transport protocol registration */
static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
-static struct phonet_protocol *phonet_proto_get(int protocol)
+static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
{
struct phonet_protocol *pp;
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = {
static DEFINE_MUTEX(proto_tab_lock);
-int __init_or_module phonet_proto_register(int protocol,
+int __init_or_module phonet_proto_register(unsigned int protocol,
struct phonet_protocol *pp)
{
int err = 0;
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol,
}
EXPORT_SYMBOL(phonet_proto_register);
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
{
mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce9b63..83ddfc07e45 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
spin_lock(&p->tcf_lock);
p->tcf_tm.lastuse = jiffies;
- p->tcf_bstats.bytes += qdisc_pkt_len(skb);
- p->tcf_bstats.packets++;
+ bstats_update(&p->tcf_bstats, skb);
action = p->tcf_action;
update_flags = p->update_flags;
spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef963225..c2a7c20e81c 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
spin_lock(&ipt->tcf_lock);
ipt->tcf_tm.lastuse = jiffies;
- ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
- ipt->tcf_bstats.packets++;
+ bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be9282..d765067e99d 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
spin_lock(&m->tcf_lock);
m->tcf_tm.lastuse = jiffies;
- m->tcf_bstats.bytes += qdisc_pkt_len(skb);
- m->tcf_bstats.packets++;
+ bstats_update(&m->tcf_bstats, skb);
dev = m->tcfm_dev;
if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb837e60..178a4bd7b7c 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
egress = p->flags & TCA_NAT_FLAG_EGRESS;
action = p->tcf_action;
- p->tcf_bstats.bytes += qdisc_pkt_len(skb);
- p->tcf_bstats.packets++;
+ bstats_update(&p->tcf_bstats, skb);
spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9640d..445bef716f7 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
bad:
p->tcf_qstats.overlimits++;
done:
- p->tcf_bstats.bytes += qdisc_pkt_len(skb);
- p->tcf_bstats.packets++;
+ bstats_update(&p->tcf_bstats, skb);
spin_unlock(&p->tcf_lock);
return p->tcf_action;
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf7439b47..e2f08b1e2e5 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
spin_lock(&police->tcf_lock);
- police->tcf_bstats.bytes += qdisc_pkt_len(skb);
- police->tcf_bstats.packets++;
+ bstats_update(&police->tcf_bstats, skb);
if (police->tcfp_ewma_rate &&
police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3ee77..7287cff7af3 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies;
- d->tcf_bstats.bytes += qdisc_pkt_len(skb);
- d->tcf_bstats.packets++;
+ bstats_update(&d->tcf_bstats, skb);
/* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4eb885..836f5fee9e5 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies;
- d->tcf_bstats.bytes += qdisc_pkt_len(skb);
- d->tcf_bstats.packets++;
+ bstats_update(&d->tcf_bstats, skb);
if (d->flags & SKBEDIT_F_PRIORITY)
skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 282540778aa..943d733409d 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@ drop: __maybe_unused
}
return ret;
}
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
- flow->bstats.bytes += qdisc_pkt_len(skb);
- flow->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
+ bstats_update(&flow->bstats, skb);
/*
* Okay, this may seem weird. We pretend we've dropped the packet if
* it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb763159086..c80d1c210c5 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b5313f8c..de55e642eaf 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
- unsigned int len;
int err;
cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- len = qdisc_pkt_len(skb);
err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = cl->quantum;
}
- cl->bstats.packets++;
- cl->bstats.bytes += len;
- sch->bstats.packets++;
- sch->bstats.bytes += len;
+ bstats_update(&cl->bstats, skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return err;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d62bb5..60f4bdd4408 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b7bb3..2e45791d4f6 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb));
- cl->bstats.packets++;
- cl->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
+ bstats_update(&cl->bstats, skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d6c52..984c1b0c683 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
return ret;
} else {
- cl->bstats.packets +=
- skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
- cl->bstats.bytes += qdisc_pkt_len(skb);
+ bstats_update(&cl->bstats, skb);
htb_activate(q, cl);
}
sch->q.qlen++;
- sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
- sch->bstats.bytes += qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
htb_add_to_wait_tree(q, cl, diff);
}
- /* update byte stats except for leaves which are already updated */
- if (cl->level) {
- cl->bstats.bytes += bytes;
- cl->bstats.packets += skb_is_gso(skb)?
- skb_shinfo(skb)->gso_segs:1;
- }
+ /* update basic stats except for leaves which are already updated */
+ if (cl->level)
+ bstats_update(&cl->bstats, skb);
+
cl = cl->parent;
}
}
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a6844..bce1665239b 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
result = tc_classify(skb, p->filter_list, &res);
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
switch (result) {
case TC_ACT_SHOT:
result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690deab5d..21f13da2476 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c083a7..1c4bce86347 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
} else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
}
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
- sch->bstats.bytes += qdisc_pkt_len(nskb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, nskb);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bce33c..966158d49dd 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a67ba3c5a0c..a6009c5a2c9 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d54ac94066c..239ec53a634 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
slot->allot = q->scaled_quantum;
}
if (++sch->q.qlen <= q->limit) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d6463..77565e72181 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
sch->q.qlen++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a7c94..af9360d1f6e 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb);
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe67849269..67e31276682 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p)
return cred->cr_ops->crvalidate(task, p);
}
+static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *data, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
+ encode(rqstp, &xdr, obj);
+}
+
int
-rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
+rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
__be32 *data, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
if (cred->cr_ops->crwrap_req)
return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
/* By default, we encode the arguments normally. */
- return encode(rqstp, data, obj);
+ rpcauth_wrap_req_encode(encode, rqstp, data, obj);
+ return 0;
+}
+
+static int
+rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+ __be32 *data, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
+ return decode(rqstp, &xdr, obj);
}
int
-rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
+rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
__be32 *data, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
data, obj);
/* By default, we decode the arguments normally. */
- return decode(rqstp, data, obj);
+ return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
}
int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce35e22..45dbf1521b9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@ out_bad:
return NULL;
}
+static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
+ encode(rqstp, &xdr, obj);
+}
+
static inline int
gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
{
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
struct xdr_buf integ_buf;
@@ -1249,9 +1259,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = encode(rqstp, p, obj);
- if (status)
- return status;
+ gss_wrap_req_encode(encode, rqstp, p, obj);
if (xdr_buf_subsegment(snd_buf, &integ_buf,
offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@ out:
static inline int
gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
{
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
u32 offset;
@@ -1342,9 +1351,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = encode(rqstp, p, obj);
- if (status)
- return status;
+ gss_wrap_req_encode(encode, rqstp, p, obj);
status = alloc_enc_pages(rqstp);
if (status)
@@ -1394,7 +1401,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
static int
gss_wrap_req(struct rpc_task *task,
- kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@ gss_wrap_req(struct rpc_task *task,
/* The spec seems a little ambiguous here, but I think that not
* wrapping context destruction requests makes the most sense.
*/
- status = encode(rqstp, p, obj);
+ gss_wrap_req_encode(encode, rqstp, p, obj);
+ status = 0;
goto out;
}
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
- status = encode(rqstp, p, obj);
+ gss_wrap_req_encode(encode, rqstp, p, obj);
+ status = 0;
break;
case RPC_GSS_SVC_INTEGRITY:
status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
return 0;
}
+static int
+gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ return decode(rqstp, &xdr, obj);
+}
static int
gss_unwrap_resp(struct rpc_task *task,
- kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
+ kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@ gss_unwrap_resp(struct rpc_task *task,
cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
+ (savedlen - head->iov_len);
out_decode:
- status = decode(rqstp, p, obj);
+ status = gss_unwrap_req_decode(decode, rqstp, p, obj);
out:
gss_put_ctx(ctx);
dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0cc350..1dd1a689000 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@ int bc_send(struct rpc_rqst *req)
ret = task->tk_status;
rpc_put_task(task);
}
- return ret;
dprintk("RPC: bc_send ret= %d\n", ret);
+ return ret;
}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 92ce94f5146..57d344cf225 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1095,7 +1095,7 @@ static void
rpc_xdr_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- kxdrproc_t encode;
+ kxdreproc_t encode;
__be32 *p;
dprint_status(task);
@@ -1535,7 +1535,7 @@ call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
- kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
+ kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1776,12 +1776,11 @@ out_overflow:
goto out_garbage;
}
-static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
+static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
- return 0;
}
-static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
+static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
return 0;
}
@@ -1830,23 +1829,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
const struct rpc_task *task)
{
const char *rpc_waitq = "none";
- char *p, action[KSYM_SYMBOL_LEN];
if (RPC_IS_QUEUED(task))
rpc_waitq = rpc_qname(task->tk_waitqueue);
- /* map tk_action pointer to a function name; then trim off
- * the "+0x0 [sunrpc]" */
- sprint_symbol(action, (unsigned long)task->tk_action);
- p = strchr(action, '+');
- if (p)
- *p = '\0';
-
- printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
+ printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
task->tk_pid, task->tk_flags, task->tk_status,
clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
- action, rpc_waitq);
+ task->tk_action, rpc_waitq);
}
void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 09f01f41e55..72bc5368396 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -474,7 +474,7 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
{
struct inode *inode;
- BUG_ON(!d_unhashed(dentry));
+ d_drop(dentry);
inode = rpc_get_inode(dir->i_sb, mode);
if (!inode)
goto out_err;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca2c85..c652e4cc9fe 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@ enum {
RPCBPROC_GETSTAT,
};
-#define RPCB_HIGHPROC_2 RPCBPROC_CALLIT
-#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR
-#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT
-
/*
* r_owner
*
@@ -693,46 +689,37 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
* XDR functions for rpcbind
*/
-static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
- const struct rpcbind_args *rpcb)
+static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
- p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
- if (unlikely(p == NULL))
- return -EIO;
-
- *p++ = htonl(rpcb->r_prog);
- *p++ = htonl(rpcb->r_vers);
- *p++ = htonl(rpcb->r_prot);
- *p = htonl(rpcb->r_port);
-
- return 0;
+ p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
+ *p++ = cpu_to_be32(rpcb->r_prog);
+ *p++ = cpu_to_be32(rpcb->r_vers);
+ *p++ = cpu_to_be32(rpcb->r_prot);
+ *p = cpu_to_be32(rpcb->r_port);
}
-static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
unsigned long port;
-
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+ __be32 *p;
rpcb->r_port = 0;
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- port = ntohl(*p);
+ port = be32_to_cpup(p);
dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
task->tk_msg.rpc_proc->p_name, port);
if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
return 0;
}
-static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
unsigned int *boolp)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
-
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+ __be32 *p;
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
*boolp = 0;
- if (*p)
+ if (*p != xdr_zero)
*boolp = 1;
dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@ static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
return 0;
}
-static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
- const u32 maxstrlen)
+static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
+ const u32 maxstrlen)
{
- u32 len;
__be32 *p;
+ u32 len;
- if (unlikely(string == NULL))
- return -EIO;
len = strlen(string);
- if (unlikely(len > maxstrlen))
- return -EIO;
-
- p = xdr_reserve_space(xdr, sizeof(__be32) + len);
- if (unlikely(p == NULL))
- return -EIO;
+ BUG_ON(len > maxstrlen);
+ p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
-
- return 0;
}
-static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
- const struct rpcbind_args *rpcb)
+static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers,
rpcb->r_netid, rpcb->r_addr);
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
- p = xdr_reserve_space(&xdr,
- sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
- if (unlikely(p == NULL))
- return -EIO;
- *p++ = htonl(rpcb->r_prog);
- *p = htonl(rpcb->r_vers);
-
- if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
- return -EIO;
- if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
- return -EIO;
- if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
- return -EIO;
+ p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
+ *p++ = cpu_to_be32(rpcb->r_prog);
+ *p = cpu_to_be32(rpcb->r_vers);
- return 0;
+ encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
+ encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
+ encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
}
-static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct sockaddr_storage address;
struct sockaddr *sap = (struct sockaddr *)&address;
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
u32 len;
rpcb->r_port = 0;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_fail;
- len = ntohl(*p);
+ len = be32_to_cpup(p);
/*
* If the returned universal address is a null string,
@@ -845,7 +810,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
if (unlikely(len > RPCBIND_MAXUADDRLEN))
goto out_fail;
- p = xdr_inline_decode(&xdr, len);
+ p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@ out_fail:
static struct rpc_procinfo rpcb_procedures2[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -881,8 +846,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -891,8 +856,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_GETPORT] = {
.p_proc = RPCBPROC_GETPORT,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_getport,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getport,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_getportres_sz,
.p_statidx = RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
static struct rpc_procinfo rpcb_procedures3[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -914,8 +879,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -924,8 +889,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
static struct rpc_procinfo rpcb_procedures4[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -947,8 +912,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -957,8 +922,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@ static struct rpcb_info rpcb_next_version6[] = {
static struct rpc_version rpcb_version2 = {
.number = RPCBVERS_2,
- .nrprocs = RPCB_HIGHPROC_2,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures2),
.procs = rpcb_procedures2
};
static struct rpc_version rpcb_version3 = {
.number = RPCBVERS_3,
- .nrprocs = RPCB_HIGHPROC_3,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures3),
.procs = rpcb_procedures3
};
static struct rpc_version rpcb_version4 = {
.number = RPCBVERS_4,
- .nrprocs = RPCB_HIGHPROC_4,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures4),
.procs = rpcb_procedures4
};
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42c494..0e659c665a8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@ svc_destroy(struct svc_serv *serv)
if (svc_serv_is_pooled(serv))
svc_pool_map_put();
-#if defined(CONFIG_NFS_V4_1)
- svc_sock_destroy(serv->bc_xprt);
-#endif /* CONFIG_NFS_V4_1 */
-
svc_unregister(serv);
kfree(serv->sv_pools);
kfree(serv);
@@ -1147,7 +1143,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
dprintk("svc: svc_process dropit\n");
- svc_drop(rqstp);
return 0;
err_short_len:
@@ -1218,7 +1213,6 @@ svc_process(struct svc_rqst *rqstp)
struct kvec *resv = &rqstp->rq_res.head[0];
struct svc_serv *serv = rqstp->rq_server;
u32 dir;
- int error;
/*
* Setup response xdr_buf.
@@ -1246,11 +1240,13 @@ svc_process(struct svc_rqst *rqstp)
return 0;
}
- error = svc_process_common(rqstp, argv, resv);
- if (error <= 0)
- return error;
-
- return svc_send(rqstp);
+ /* Returns 1 for send, 0 for drop */
+ if (svc_process_common(rqstp, argv, resv))
+ return svc_send(rqstp);
+ else {
+ svc_drop(rqstp);
+ return 0;
+ }
}
#if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1260,9 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
- int error;
/* Build the svc_rqst used by the common processing routine */
- rqstp->rq_xprt = serv->bc_xprt;
+ rqstp->rq_xprt = serv->sv_bc_xprt;
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
rqstp->rq_server = serv;
@@ -1292,12 +1287,15 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
svc_getu32(argv); /* XID */
svc_getnl(argv); /* CALLDIR */
- error = svc_process_common(rqstp, argv, resv);
- if (error <= 0)
- return error;
-
- memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
- return bc_send(req);
+ /* Returns 1 for send, 0 for drop */
+ if (svc_process_common(rqstp, argv, resv)) {
+ memcpy(&req->rq_snd_buf, &rqstp->rq_res,
+ sizeof(req->rq_snd_buf));
+ return bc_send(req);
+ } else {
+ /* Nothing to do to drop request */
+ return 0;
+ }
}
EXPORT_SYMBOL(bc_svc_process);
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e16be3..d265aa700bb 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@ static void svc_sock_free(struct svc_xprt *);
static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
struct net *, struct sockaddr *,
int, int);
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+ struct net *, struct sockaddr *,
+ int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key svc_key[2];
static struct lock_class_key svc_slock_key[2];
@@ -1184,6 +1191,57 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
}
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+ struct net *, struct sockaddr *,
+ int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+
+static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
+ struct net *net,
+ struct sockaddr *sa, int salen,
+ int flags)
+{
+ return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
+}
+
+static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
+{
+}
+
+static struct svc_xprt_ops svc_tcp_bc_ops = {
+ .xpo_create = svc_bc_tcp_create,
+ .xpo_detach = svc_bc_tcp_sock_detach,
+ .xpo_free = svc_bc_sock_free,
+ .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
+};
+
+static struct svc_xprt_class svc_tcp_bc_class = {
+ .xcl_name = "tcp-bc",
+ .xcl_owner = THIS_MODULE,
+ .xcl_ops = &svc_tcp_bc_ops,
+ .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+};
+
+static void svc_init_bc_xprt_sock(void)
+{
+ svc_reg_xprt_class(&svc_tcp_bc_class);
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+ svc_unreg_xprt_class(&svc_tcp_bc_class);
+}
+#else /* CONFIG_NFS_V4_1 */
+static void svc_init_bc_xprt_sock(void)
+{
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static struct svc_xprt_ops svc_tcp_ops = {
.xpo_create = svc_tcp_create,
.xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1265,14 @@ void svc_init_xprt_sock(void)
{
svc_reg_xprt_class(&svc_tcp_class);
svc_reg_xprt_class(&svc_udp_class);
+ svc_init_bc_xprt_sock();
}
void svc_cleanup_xprt_sock(void)
{
svc_unreg_xprt_class(&svc_tcp_class);
svc_unreg_xprt_class(&svc_udp_class);
+ svc_cleanup_bc_xprt_sock();
}
static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1569,45 @@ static void svc_sock_free(struct svc_xprt *xprt)
kfree(svsk);
}
+#if defined(CONFIG_NFS_V4_1)
/*
- * Create a svc_xprt.
- *
- * For internal use only (e.g. nfsv4.1 backchannel).
- * Callers should typically use the xpo_create() method.
+ * Create a back channel svc_xprt which shares the fore channel socket.
*/
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
+ int protocol,
+ struct net *net,
+ struct sockaddr *sin, int len,
+ int flags)
{
struct svc_sock *svsk;
- struct svc_xprt *xprt = NULL;
+ struct svc_xprt *xprt;
+
+ if (protocol != IPPROTO_TCP) {
+ printk(KERN_WARNING "svc: only TCP sockets"
+ " supported on shared back channel\n");
+ return ERR_PTR(-EINVAL);
+ }
- dprintk("svc: %s\n", __func__);
svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
if (!svsk)
- goto out;
+ return ERR_PTR(-ENOMEM);
xprt = &svsk->sk_xprt;
- if (prot == IPPROTO_TCP)
- svc_xprt_init(&svc_tcp_class, xprt, serv);
- else if (prot == IPPROTO_UDP)
- svc_xprt_init(&svc_udp_class, xprt, serv);
- else
- BUG();
-out:
- dprintk("svc: %s return %p\n", __func__, xprt);
+ svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
+
+ serv->sv_bc_xprt = xprt;
+
return xprt;
}
-EXPORT_SYMBOL_GPL(svc_sock_create);
/*
- * Destroy a svc_sock.
+ * Free a back channel svc_sock.
*/
-void svc_sock_destroy(struct svc_xprt *xprt)
+static void svc_bc_sock_free(struct svc_xprt *xprt)
{
- if (xprt)
+ if (xprt) {
+ kfree(xprt->xpt_bc_sid);
kfree(container_of(xprt, struct svc_sock, sk_xprt));
+ }
}
-EXPORT_SYMBOL_GPL(svc_sock_destroy);
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841e749..679cd674b81 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
}
EXPORT_SYMBOL_GPL(xdr_write_pages);
+static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
+ __be32 *p, unsigned int len)
+{
+ if (len > iov->iov_len)
+ len = iov->iov_len;
+ if (p == NULL)
+ p = (__be32*)iov->iov_base;
+ xdr->p = p;
+ xdr->end = (__be32*)(iov->iov_base + len);
+ xdr->iov = iov;
+ xdr->page_ptr = NULL;
+}
+
+static int xdr_set_page_base(struct xdr_stream *xdr,
+ unsigned int base, unsigned int len)
+{
+ unsigned int pgnr;
+ unsigned int maxlen;
+ unsigned int pgoff;
+ unsigned int pgend;
+ void *kaddr;
+
+ maxlen = xdr->buf->page_len;
+ if (base >= maxlen)
+ return -EINVAL;
+ maxlen -= base;
+ if (len > maxlen)
+ len = maxlen;
+
+ base += xdr->buf->page_base;
+
+ pgnr = base >> PAGE_SHIFT;
+ xdr->page_ptr = &xdr->buf->pages[pgnr];
+ kaddr = page_address(*xdr->page_ptr);
+
+ pgoff = base & ~PAGE_MASK;
+ xdr->p = (__be32*)(kaddr + pgoff);
+
+ pgend = pgoff + len;
+ if (pgend > PAGE_SIZE)
+ pgend = PAGE_SIZE;
+ xdr->end = (__be32*)(kaddr + pgend);
+ xdr->iov = NULL;
+ return 0;
+}
+
+static void xdr_set_next_page(struct xdr_stream *xdr)
+{
+ unsigned int newbase;
+
+ newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
+ newbase -= xdr->buf->page_base;
+
+ if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
+ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+}
+
+static bool xdr_set_next_buffer(struct xdr_stream *xdr)
+{
+ if (xdr->page_ptr != NULL)
+ xdr_set_next_page(xdr);
+ else if (xdr->iov == xdr->buf->head) {
+ if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
+ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+ }
+ return xdr->p != xdr->end;
+}
+
/**
* xdr_init_decode - Initialize an xdr_stream for decoding data.
* @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages);
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
{
- struct kvec *iov = buf->head;
- unsigned int len = iov->iov_len;
-
- if (len > buf->len)
- len = buf->len;
xdr->buf = buf;
- xdr->iov = iov;
- xdr->p = p;
- xdr->end = (__be32 *)((char *)iov->iov_base + len);
+ xdr->scratch.iov_base = NULL;
+ xdr->scratch.iov_len = 0;
+ if (buf->head[0].iov_len != 0)
+ xdr_set_iov(xdr, buf->head, p, buf->len);
+ else if (buf->page_len != 0)
+ xdr_set_page_base(xdr, 0, buf->len);
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
-/**
- * xdr_inline_peek - Allow read-ahead in the XDR data stream
- * @xdr: pointer to xdr_stream struct
- * @nbytes: number of bytes of data to decode
- *
- * Check if the input buffer is long enough to enable us to decode
- * 'nbytes' more bytes of data starting at the current position.
- * If so return the current pointer without updating the current
- * pointer position.
- */
-__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
+static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr->p;
__be32 *q = p + XDR_QUADLEN(nbytes);
if (unlikely(q > xdr->end || q < p))
return NULL;
+ xdr->p = q;
return p;
}
-EXPORT_SYMBOL_GPL(xdr_inline_peek);
/**
- * xdr_inline_decode - Retrieve non-page XDR data to decode
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+ xdr->scratch.iov_base = buf;
+ xdr->scratch.iov_len = buflen;
+}
+EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
+
+static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
+{
+ __be32 *p;
+ void *cpdest = xdr->scratch.iov_base;
+ size_t cplen = (char *)xdr->end - (char *)xdr->p;
+
+ if (nbytes > xdr->scratch.iov_len)
+ return NULL;
+ memcpy(cpdest, xdr->p, cplen);
+ cpdest += cplen;
+ nbytes -= cplen;
+ if (!xdr_set_next_buffer(xdr))
+ return NULL;
+ p = __xdr_inline_decode(xdr, nbytes);
+ if (p == NULL)
+ return NULL;
+ memcpy(cpdest, p, nbytes);
+ return xdr->scratch.iov_base;
+}
+
+/**
+ * xdr_inline_decode - Retrieve XDR data to decode
* @xdr: pointer to xdr_stream struct
* @nbytes: number of bytes of data to decode
*
@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek);
*/
__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
- __be32 *p = xdr->p;
- __be32 *q = p + XDR_QUADLEN(nbytes);
+ __be32 *p;
- if (unlikely(q > xdr->end || q < p))
+ if (nbytes == 0)
+ return xdr->p;
+ if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
return NULL;
- xdr->p = q;
- return p;
+ p = __xdr_inline_decode(xdr, nbytes);
+ if (p != NULL)
+ return p;
+ return xdr_copy_to_scratch(xdr, nbytes);
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
*/
void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
{
- char * kaddr = page_address(xdr->buf->pages[0]);
xdr_read_pages(xdr, len);
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
- if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
- len = PAGE_CACHE_SIZE - xdr->buf->page_base;
- xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
- xdr->end = (__be32 *)((char *)xdr->p + len);
+ xdr_set_page_base(xdr, 0, len);
}
EXPORT_SYMBOL_GPL(xdr_enter_page);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb88951091..d5e1e0b0889 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/netlink.h>
+#include <net/ah.h>
#include <asm/uaccess.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <linux/in6.h>
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
- if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+ if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
+ ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
return -EINVAL;
*props = algo->desc.sadb_alg_id;
@@ -2187,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
- (nlh->nlmsg_flags & NLM_F_DUMP)) {
+ (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
if (link->dump == NULL)
return -EINVAL;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1b9b13ee2a7..2b5387d53ba 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -227,7 +227,7 @@ ifndef PERF_DEBUG
CFLAGS_OPTIMIZE = -O6
endif
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7bc04903548..7069bd3e90b 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -331,6 +331,9 @@ try_again:
else if (err == ENODEV && cpu_list) {
die("No such device - did you specify"
" an out-of-range profile CPU?\n");
+ } else if (err == ENOENT) {
+ die("%s event is not supported. ",
+ event_name(evsel));
} else if (err == EINVAL && sample_id_all_avail) {
/*
* Old kernel, no attr->sample_id_type_all field
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7a4ebeb8b01..abd4b8497bc 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -489,7 +489,8 @@ static void create_tasks(void)
err = pthread_attr_init(&attr);
BUG_ON(err);
- err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
+ err = pthread_attr_setstacksize(&attr,
+ (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
BUG_ON(err);
err = pthread_mutex_lock(&start_work_mutex);
BUG_ON(err);
@@ -1861,7 +1862,7 @@ static int __cmd_record(int argc, const char **argv)
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
- if (rec_argv)
+ if (rec_argv == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 02b2d8013a6..c385a63ebfd 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -316,6 +316,8 @@ static int run_perf_stat(int argc __used, const char **argv)
"\t Consider tweaking"
" /proc/sys/kernel/perf_event_paranoid or running as root.",
system_wide ? "system-wide " : "");
+ } else if (errno == ENOENT) {
+ error("%s event is not supported. ", event_name(counter));
} else {
error("open_counter returned with %d (%s). "
"/bin/dmesg may provide additional information.\n",
@@ -683,8 +685,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
nr_counters = ARRAY_SIZE(default_attrs);
for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
- pos = perf_evsel__new(default_attrs[c].type,
- default_attrs[c].config,
+ pos = perf_evsel__new(&default_attrs[c],
nr_counters);
if (pos == NULL)
goto out;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1c984342a57..ed5696198d3 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -234,6 +234,7 @@ out:
return err;
}
+#include "util/cpumap.h"
#include "util/evsel.h"
#include <sys/types.h>
@@ -264,6 +265,7 @@ static int test__open_syscall_event(void)
int err = -1, fd;
struct thread_map *threads;
struct perf_evsel *evsel;
+ struct perf_event_attr attr;
unsigned int nr_open_calls = 111, i;
int id = trace_event__id("sys_enter_open");
@@ -278,7 +280,10 @@ static int test__open_syscall_event(void)
return -1;
}
- evsel = perf_evsel__new(PERF_TYPE_TRACEPOINT, id, 0);
+ memset(&attr, 0, sizeof(attr));
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.config = id;
+ evsel = perf_evsel__new(&attr, 0);
if (evsel == NULL) {
pr_debug("perf_evsel__new\n");
goto out_thread_map_delete;
@@ -317,6 +322,111 @@ out_thread_map_delete:
return err;
}
+#include <sched.h>
+
+static int test__open_syscall_event_on_all_cpus(void)
+{
+ int err = -1, fd, cpu;
+ struct thread_map *threads;
+ struct cpu_map *cpus;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr;
+ unsigned int nr_open_calls = 111, i;
+ cpu_set_t *cpu_set;
+ size_t cpu_set_size;
+ int id = trace_event__id("sys_enter_open");
+
+ if (id < 0) {
+ pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+ return -1;
+ }
+
+ threads = thread_map__new(-1, getpid());
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return -1;
+ }
+
+ cpus = cpu_map__new(NULL);
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return -1;
+ }
+
+ cpu_set = CPU_ALLOC(cpus->nr);
+
+ if (cpu_set == NULL)
+ goto out_thread_map_delete;
+
+ cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
+ CPU_ZERO_S(cpu_set_size, cpu_set);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.config = id;
+ evsel = perf_evsel__new(&attr, 0);
+ if (evsel == NULL) {
+ pr_debug("perf_evsel__new\n");
+ goto out_cpu_free;
+ }
+
+ if (perf_evsel__open(evsel, cpus, threads) < 0) {
+ pr_debug("failed to open counter: %s, "
+ "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+ strerror(errno));
+ goto out_evsel_delete;
+ }
+
+ for (cpu = 0; cpu < cpus->nr; ++cpu) {
+ unsigned int ncalls = nr_open_calls + cpu;
+
+ CPU_SET(cpu, cpu_set);
+ sched_setaffinity(0, cpu_set_size, cpu_set);
+ for (i = 0; i < ncalls; ++i) {
+ fd = open("/etc/passwd", O_RDONLY);
+ close(fd);
+ }
+ CPU_CLR(cpu, cpu_set);
+ }
+
+ /*
+ * Here we need to explicitely preallocate the counts, as if
+ * we use the auto allocation it will allocate just for 1 cpu,
+ * as we start by cpu 0.
+ */
+ if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
+ pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
+ goto out_close_fd;
+ }
+
+ for (cpu = 0; cpu < cpus->nr; ++cpu) {
+ unsigned int expected;
+
+ if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
+ pr_debug("perf_evsel__open_read_on_cpu\n");
+ goto out_close_fd;
+ }
+
+ expected = nr_open_calls + cpu;
+ if (evsel->counts->cpu[cpu].val != expected) {
+ pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
+ expected, cpu, evsel->counts->cpu[cpu].val);
+ goto out_close_fd;
+ }
+ }
+
+ err = 0;
+out_close_fd:
+ perf_evsel__close_fd(evsel, 1, threads->nr);
+out_evsel_delete:
+ perf_evsel__delete(evsel);
+out_cpu_free:
+ CPU_FREE(cpu_set);
+out_thread_map_delete:
+ thread_map__delete(threads);
+ return err;
+}
+
static struct test {
const char *desc;
int (*func)(void);
@@ -330,6 +440,10 @@ static struct test {
.func = test__open_syscall_event,
},
{
+ .desc = "detect open syscall event on all cpus",
+ .func = test__open_syscall_event_on_all_cpus,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1e67ab9c7eb..6ce4042421b 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1247,6 +1247,8 @@ try_again:
die("Permission error - are you root?\n"
"\t Consider tweaking"
" /proc/sys/kernel/perf_event_paranoid.\n");
+ if (err == ENOENT)
+ die("%s event is not supported. ", event_name(evsel));
/*
* If it's cycles then fall back to hrtimer
* based cpu-clock-tick sw counter, which
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c95267e63c5..f5cfed60af9 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -6,14 +6,13 @@
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
{
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
if (evsel != NULL) {
evsel->idx = idx;
- evsel->attr.type = type;
- evsel->attr.config = config;
+ evsel->attr = *attr;
INIT_LIST_HEAD(&evsel->node);
}
@@ -128,59 +127,75 @@ int __perf_evsel__read(struct perf_evsel *evsel,
return 0;
}
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads)
{
- int cpu;
+ int cpu, thread;
- if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
+ if (evsel->fd == NULL &&
+ perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
return -1;
for (cpu = 0; cpu < cpus->nr; cpu++) {
- FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
- cpus->map[cpu], -1, 0);
- if (FD(evsel, cpu, 0) < 0)
- goto out_close;
+ for (thread = 0; thread < threads->nr; thread++) {
+ FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+ threads->map[thread],
+ cpus->map[cpu], -1, 0);
+ if (FD(evsel, cpu, thread) < 0)
+ goto out_close;
+ }
}
return 0;
out_close:
- while (--cpu >= 0) {
- close(FD(evsel, cpu, 0));
- FD(evsel, cpu, 0) = -1;
- }
+ do {
+ while (--thread >= 0) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+ thread = threads->nr;
+ } while (--cpu >= 0);
return -1;
}
-int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
+static struct {
+ struct cpu_map map;
+ int cpus[1];
+} empty_cpu_map = {
+ .map.nr = 1,
+ .cpus = { -1, },
+};
+
+static struct {
+ struct thread_map map;
+ int threads[1];
+} empty_thread_map = {
+ .map.nr = 1,
+ .threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel,
+ struct cpu_map *cpus, struct thread_map *threads)
{
- int thread;
-
- if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
- return -1;
- for (thread = 0; thread < threads->nr; thread++) {
- FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
- threads->map[thread], -1, -1, 0);
- if (FD(evsel, 0, thread) < 0)
- goto out_close;
+ if (cpus == NULL) {
+ /* Work around old compiler warnings about strict aliasing */
+ cpus = &empty_cpu_map.map;
}
- return 0;
+ if (threads == NULL)
+ threads = &empty_thread_map.map;
-out_close:
- while (--thread >= 0) {
- close(FD(evsel, 0, thread));
- FD(evsel, 0, thread) = -1;
- }
- return -1;
+ return __perf_evsel__open(evsel, cpus, threads);
}
-int perf_evsel__open(struct perf_evsel *evsel,
- struct cpu_map *cpus, struct thread_map *threads)
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
{
- if (threads == NULL)
- return perf_evsel__open_per_cpu(evsel, cpus);
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+}
- return perf_evsel__open_per_thread(evsel, threads);
+int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
+{
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a0ccd69c3fc..b2d755fe88a 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -37,7 +37,7 @@ struct perf_evsel {
struct cpu_map;
struct thread_map;
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx);
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
void perf_evsel__delete(struct perf_evsel *evsel);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 649083f27e0..5cb6f4bde90 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -490,6 +490,31 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
return EVT_HANDLED_ALL;
}
+static int store_event_type(const char *orgname)
+{
+ char filename[PATH_MAX], *c;
+ FILE *file;
+ int id, n;
+
+ sprintf(filename, "%s/", debugfs_path);
+ strncat(filename, orgname, strlen(orgname));
+ strcat(filename, "/id");
+
+ c = strchr(filename, ':');
+ if (c)
+ *c = '/';
+
+ file = fopen(filename, "r");
+ if (!file)
+ return 0;
+ n = fscanf(file, "%i", &id);
+ fclose(file);
+ if (n < 1) {
+ pr_err("cannot store event ID\n");
+ return -EINVAL;
+ }
+ return perf_header__push_event(id, orgname);
+}
static enum event_result parse_tracepoint_event(const char **strp,
struct perf_event_attr *attr)
@@ -533,9 +558,13 @@ static enum event_result parse_tracepoint_event(const char **strp,
*strp += strlen(sys_name) + evt_length;
return parse_multiple_tracepoint_event(sys_name, evt_name,
flags);
- } else
+ } else {
+ if (store_event_type(evt_name) < 0)
+ return EVT_FAILED;
+
return parse_single_tracepoint_event(sys_name, evt_name,
evt_length, attr, strp);
+ }
}
static enum event_result
@@ -778,41 +807,11 @@ modifier:
return ret;
}
-static int store_event_type(const char *orgname)
-{
- char filename[PATH_MAX], *c;
- FILE *file;
- int id, n;
-
- sprintf(filename, "%s/", debugfs_path);
- strncat(filename, orgname, strlen(orgname));
- strcat(filename, "/id");
-
- c = strchr(filename, ':');
- if (c)
- *c = '/';
-
- file = fopen(filename, "r");
- if (!file)
- return 0;
- n = fscanf(file, "%i", &id);
- fclose(file);
- if (n < 1) {
- pr_err("cannot store event ID\n");
- return -EINVAL;
- }
- return perf_header__push_event(id, orgname);
-}
-
int parse_events(const struct option *opt __used, const char *str, int unset __used)
{
struct perf_event_attr attr;
enum event_result ret;
- if (strchr(str, ':'))
- if (store_event_type(str) < 0)
- return -1;
-
for (;;) {
memset(&attr, 0, sizeof(attr));
ret = parse_event_symbols(&str, &attr);
@@ -824,7 +823,7 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
if (ret != EVT_HANDLED_ALL) {
struct perf_evsel *evsel;
- evsel = perf_evsel__new(attr.type, attr.config,
+ evsel = perf_evsel__new(&attr,
nr_counters);
if (evsel == NULL)
return -1;
@@ -1014,8 +1013,15 @@ void print_events(void)
int perf_evsel_list__create_default(void)
{
- struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE,
- PERF_COUNT_HW_CPU_CYCLES, 0);
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.config = PERF_COUNT_HW_CPU_CYCLES;
+
+ evsel = perf_evsel__new(&attr, 0);
+
if (evsel == NULL)
return -ENOMEM;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6fb4694d05f..313dac2d94c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1007,7 +1007,7 @@ more:
if (size == 0)
size = 8;
- if (head + event->header.size >= mmap_size) {
+ if (head + event->header.size > mmap_size) {
if (mmaps[map_idx]) {
munmap(mmaps[map_idx], mmap_size);
mmaps[map_idx] = NULL;